本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/sql/sqlbase.MakeIndexKeyPrefix函数的典型用法代码示例。如果您正苦于以下问题:Golang MakeIndexKeyPrefix函数的具体用法?Golang MakeIndexKeyPrefix怎么用?Golang MakeIndexKeyPrefix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MakeIndexKeyPrefix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: getTableSpan
// getTableSpan returns a span stored at a checkpoint idx, or in the absence
// of a checkpoint, the span over all keys within a table.
func (sc *SchemaChanger) getTableSpan(mutationIdx int) (roachpb.Span, error) {
var tableDesc *sqlbase.TableDescriptor
if err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
var err error
tableDesc, err = sqlbase.GetTableDescFromID(txn, sc.tableID)
return err
}); err != nil {
return roachpb.Span{}, err
}
if len(tableDesc.Mutations) < mutationIdx {
return roachpb.Span{},
errors.Errorf("cannot find idx %d among %d mutations", mutationIdx, len(tableDesc.Mutations))
}
if mutationID := tableDesc.Mutations[mutationIdx].MutationID; mutationID != sc.mutationID {
return roachpb.Span{},
errors.Errorf("mutation index pointing to the wrong schema change, %d vs expected %d", mutationID, sc.mutationID)
}
resumeSpan := tableDesc.Mutations[mutationIdx].ResumeSpan
if resumeSpan.Key != nil {
return resumeSpan, nil
}
prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID))
return roachpb.Span{
Key: prefix,
EndKey: prefix.PrefixEnd(),
}, nil
}
开发者ID:knz,项目名称:cockroach,代码行数:29,代码来源:backfill.go
示例2: createTableReaders
// createTableReaders generates a plan consisting of table reader processors,
// one for each node that has spans that we are reading.
// overrideResultColumns is optional.
func (dsp *distSQLPlanner) createTableReaders(
planCtx *planningCtx, n *scanNode, overrideResultColumns []uint32,
) (physicalPlan, error) {
spec, err := initTableReaderSpec(n)
if err != nil {
return physicalPlan{}, err
}
if overrideResultColumns != nil {
spec.OutputColumns = overrideResultColumns
} else {
spec.OutputColumns = getOutputColumnsFromScanNode(n)
}
planToStreamColMap := make([]int, len(n.resultColumns))
for i := range planToStreamColMap {
planToStreamColMap[i] = -1
}
for i, col := range spec.OutputColumns {
planToStreamColMap[col] = i
}
ordering := dsp.convertOrdering(n.ordering.ordering, planToStreamColMap)
spans := n.spans
if len(n.spans) == 0 {
// If no spans were specified retrieve all of the keys that start with our
// index key prefix.
start := roachpb.Key(sqlbase.MakeIndexKeyPrefix(&n.desc, n.index.ID))
spans = roachpb.Spans{{Key: start, EndKey: start.PrefixEnd()}}
}
spanPartitions, err := dsp.partitionSpans(planCtx, spans)
if err != nil {
return physicalPlan{}, err
}
var p physicalPlan
for _, sp := range spanPartitions {
proc := processor{
node: sp.node,
}
tr := &distsql.TableReaderSpec{}
*tr = spec
tr.Spans = make([]distsql.TableReaderSpan, len(sp.spans))
for i := range sp.spans {
tr.Spans[i].Span = sp.spans[i]
}
proc.spec.Core.SetValue(tr)
proc.spec.Output = make([]distsql.OutputRouterSpec, 1)
proc.spec.Output[0].Type = distsql.OutputRouterSpec_PASS_THROUGH
pIdx := p.addProcessor(proc)
p.resultRouters = append(p.resultRouters, pIdx)
p.planToStreamColMap = planToStreamColMap
p.ordering = ordering
}
return p, nil
}
开发者ID:jmptrader,项目名称:cockroach,代码行数:60,代码来源:distsql_physical_planner.go
示例3: init
func (tu *tableUpserter) init(txn *client.Txn) error {
tu.txn = txn
tu.tableDesc = tu.ri.helper.tableDesc
tu.indexKeyPrefix = sqlbase.MakeIndexKeyPrefix(tu.tableDesc, tu.tableDesc.PrimaryIndex.ID)
allColsIdentityExpr := len(tu.ri.insertCols) == len(tu.tableDesc.Columns) &&
tu.evaler != nil && tu.evaler.isIdentityEvaler()
if len(tu.tableDesc.Indexes) == 0 && allColsIdentityExpr {
tu.fastPathBatch = tu.txn.NewBatch()
tu.fastPathKeys = make(map[string]struct{})
return nil
}
// TODO(dan): This could be made tighter, just the rows needed for the ON
// CONFLICT exprs.
requestedCols := tu.tableDesc.Columns
if len(tu.updateCols) == 0 {
tu.fetchCols = requestedCols
tu.fetchColIDtoRowIndex = colIDtoRowIndexFromCols(requestedCols)
} else {
var err error
tu.ru, err = makeRowUpdater(
txn, tu.tableDesc, tu.fkTables, tu.updateCols, requestedCols, rowUpdaterDefault,
)
if err != nil {
return err
}
// t.ru.fetchCols can also contain columns undergoing mutation.
tu.fetchCols = tu.ru.fetchCols
tu.fetchColIDtoRowIndex = tu.ru.fetchColIDtoRowIndex
tu.updateColIDtoRowIndex = make(map[sqlbase.ColumnID]int)
for i, updateCol := range tu.ru.updateCols {
tu.updateColIDtoRowIndex[updateCol.ID] = i
}
}
valNeededForCol := make([]bool, len(tu.fetchCols))
for i, col := range tu.fetchCols {
if _, ok := tu.fetchColIDtoRowIndex[col.ID]; ok {
valNeededForCol[i] = true
}
}
return tu.fetcher.Init(
tu.tableDesc, tu.fetchColIDtoRowIndex, &tu.tableDesc.PrimaryIndex, false, false,
tu.fetchCols, valNeededForCol)
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:49,代码来源:tablewriter.go
示例4: deleteAllRowsScan
func (td *tableDeleter) deleteAllRowsScan(
ctx context.Context, resume roachpb.Span, limit int64,
) (roachpb.Span, error) {
if resume.Key == nil {
tablePrefix := sqlbase.MakeIndexKeyPrefix(
td.rd.helper.tableDesc, td.rd.helper.tableDesc.PrimaryIndex.ID)
resume = roachpb.Span{Key: roachpb.Key(tablePrefix), EndKey: roachpb.Key(tablePrefix).PrefixEnd()}
}
valNeededForCol := make([]bool, len(td.rd.helper.tableDesc.Columns))
for _, idx := range td.rd.fetchColIDtoRowIndex {
valNeededForCol[idx] = true
}
var rf sqlbase.RowFetcher
err := rf.Init(
td.rd.helper.tableDesc, td.rd.fetchColIDtoRowIndex, &td.rd.helper.tableDesc.PrimaryIndex,
false, false, td.rd.fetchCols, valNeededForCol)
if err != nil {
return resume, err
}
if err := rf.StartScan(td.txn, roachpb.Spans{resume}, true /* limit batches */, 0); err != nil {
return resume, err
}
for i := int64(0); i < limit; i++ {
row, err := rf.NextRowDecoded()
if err != nil {
return resume, err
}
if row == nil {
// Done deleting all rows.
resume = roachpb.Span{}
break
}
_, err = td.row(ctx, row)
if err != nil {
return resume, err
}
}
if resume.Key != nil {
// Update the resume start key for the next iteration.
resume.Key = rf.Key()
}
return resume, td.finalize(ctx)
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:45,代码来源:tablewriter.go
示例5: encodeIndexes
// encodeIndexes encodes the primary and secondary index keys. The
// secondaryIndexEntries are only valid until the next call to encodeIndexes or
// encodeSecondaryIndexes.
func (rh *rowHelper) encodeIndexes(
colIDtoRowIndex map[sqlbase.ColumnID]int, values []parser.Datum,
) (primaryIndexKey []byte, secondaryIndexEntries []sqlbase.IndexEntry, err error) {
if rh.primaryIndexKeyPrefix == nil {
rh.primaryIndexKeyPrefix = sqlbase.MakeIndexKeyPrefix(rh.tableDesc,
rh.tableDesc.PrimaryIndex.ID)
}
primaryIndexKey, _, err = sqlbase.EncodeIndexKey(
rh.tableDesc, &rh.tableDesc.PrimaryIndex, colIDtoRowIndex, values, rh.primaryIndexKeyPrefix)
if err != nil {
return nil, nil, err
}
secondaryIndexEntries, err = rh.encodeSecondaryIndexes(colIDtoRowIndex, values)
if err != nil {
return nil, nil, err
}
return primaryIndexKey, secondaryIndexEntries, nil
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:21,代码来源:rowwriter.go
示例6: TestDropIndex
func TestDropIndex(t *testing.T) {
defer leaktest.AfterTest(t)()
const chunkSize = 200
params, _ := createTestServerParams()
params.Knobs = base.TestingKnobs{
SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{
BackfillChunkSize: chunkSize,
},
}
s, sqlDB, kvDB := serverutils.StartServer(t, params)
defer s.Stopper().Stop()
numRows := 2*chunkSize + 1
createKVTable(t, sqlDB, numRows)
tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv")
status, i, err := tableDesc.FindIndexByName("foo")
if err != nil {
t.Fatal(err)
}
if status != sqlbase.DescriptorActive {
t.Fatal("Index 'foo' is not active.")
}
indexPrefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.Indexes[i].ID))
checkKeyCount(t, kvDB, indexPrefix, numRows)
if _, err := sqlDB.Exec(`DROP INDEX [email protected]`); err != nil {
t.Fatal(err)
}
checkKeyCount(t, kvDB, indexPrefix, 0)
tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv")
if _, _, err := tableDesc.FindIndexByName("foo"); err == nil {
t.Fatalf("table descriptor still contains index after index is dropped")
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:37,代码来源:drop_test.go
示例7: deleteIndexFast
func (td *tableDeleter) deleteIndexFast(
ctx context.Context, idx *sqlbase.IndexDescriptor, resume roachpb.Span, limit int64,
) (roachpb.Span, error) {
if resume.Key == nil {
indexPrefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td.rd.helper.tableDesc, idx.ID))
resume = roachpb.Span{
Key: indexPrefix,
EndKey: indexPrefix.PrefixEnd(),
}
}
if log.V(2) {
log.Infof(ctx, "DelRange %s - %s", resume.Key, resume.EndKey)
}
td.b.DelRange(resume.Key, resume.EndKey, false /* returnKeys */)
td.b.Header.MaxSpanRequestKeys = limit
if err := td.finalize(ctx); err != nil {
return resume, err
}
if l := len(td.b.Results); l != 1 {
panic(fmt.Sprintf("%d results returned, expected 1", l))
}
return td.b.Results[0].ResumeSpan, nil
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:24,代码来源:tablewriter.go
示例8: restoreTable
// restoreTable inserts the given DatabaseDescriptor. If the name conflicts with
// an existing table, the one being restored is rekeyed with a new ID and the
// old data is deleted.
func restoreTable(
ctx context.Context,
db client.DB,
database sqlbase.DatabaseDescriptor,
table *sqlbase.TableDescriptor,
ranges []sqlbase.BackupRangeDescriptor,
) error {
if log.V(1) {
log.Infof(ctx, "Restoring Table %q", table.Name)
}
var newTableID sqlbase.ID
if err := db.Txn(ctx, func(txn *client.Txn) error {
// Make sure there's a database with a name that matches the original.
if _, err := getDescriptorID(txn, tableKey{name: database.Name}); err != nil {
return errors.Wrapf(err, "a database named %q needs to exist to restore table %q",
database.Name, table.Name)
}
// Assign a new ID for the table. TODO(dan): For now, we're always
// generating a new ID, but varints get longer as they get bigger and so
// our keys will, too. We should someday figure out how to overwrite an
// existing table and steal its ID.
var err error
newTableID, err = GenerateUniqueDescID(txn)
return err
}); err != nil {
return err
}
// Create the iteration keys before we give the table its new ID.
tableStartKeyOld := roachpb.Key(sqlbase.MakeIndexKeyPrefix(table, table.PrimaryIndex.ID))
tableEndKeyOld := tableStartKeyOld.PrefixEnd()
// This loop makes restoring multiple tables O(N*M), where N is the number
// of tables and M is the number of ranges. We could reduce this using an
// interval tree if necessary.
var wg sync.WaitGroup
result := struct {
syncutil.Mutex
firstErr error
numErrs int
}{}
for _, rangeDesc := range ranges {
if len(rangeDesc.Path) == 0 {
// Empty path means empty range.
continue
}
intersectBegin, intersectEnd := IntersectHalfOpen(
rangeDesc.StartKey, rangeDesc.EndKey, tableStartKeyOld, tableEndKeyOld)
if intersectBegin != nil && intersectEnd != nil {
// Write the data under the new ID.
// TODO(dan): There's no SQL descriptors that point at this yet, so it
// should be possible to remove it from the one txn this is all currently
// run under. If we do that, make sure this data gets cleaned up on errors.
wg.Add(1)
go func(desc sqlbase.BackupRangeDescriptor) {
for r := retry.StartWithCtx(ctx, base.DefaultRetryOptions()); r.Next(); {
err := db.Txn(ctx, func(txn *client.Txn) error {
return Ingest(ctx, txn, desc.Path, desc.CRC, intersectBegin, intersectEnd, newTableID)
})
if _, ok := err.(*client.AutoCommitError); ok {
log.Errorf(ctx, "auto commit error during ingest: %s", err)
// TODO(dan): Ingest currently does not rely on the
// range being empty, but the plan is that it will. When
// that change happens, this will have to delete any
// partially ingested data or something.
continue
}
if err != nil {
log.Errorf(ctx, "%T %s", err, err)
result.Lock()
defer result.Unlock()
if result.firstErr != nil {
result.firstErr = err
}
result.numErrs++
}
break
}
wg.Done()
}(rangeDesc)
}
}
wg.Wait()
// All concurrent accesses have finished, we don't need the lock anymore.
if result.firstErr != nil {
// This leaves the data that did get imported in case the user wants to
// retry.
// TODO(dan): Build tooling to allow a user to restart a failed restore.
return errors.Wrapf(result.firstErr, "ingest encountered %d errors", result.numErrs)
}
table.ID = newTableID
return db.Txn(ctx, func(txn *client.Txn) error {
//.........这里部分代码省略.........
开发者ID:BramGruneir,项目名称:cockroach,代码行数:101,代码来源:backup.go
示例9: TestClusterFlow
func TestClusterFlow(t *testing.T) {
defer leaktest.AfterTest(t)()
const numRows = 100
args := base.TestClusterArgs{ReplicationMode: base.ReplicationManual}
tc := serverutils.StartTestCluster(t, 3, args)
defer tc.Stopper().Stop()
sumDigitsFn := func(row int) parser.Datum {
sum := 0
for row > 0 {
sum += row % 10
row /= 10
}
return parser.NewDInt(parser.DInt(sum))
}
sqlutils.CreateTable(t, tc.ServerConn(0), "t",
"num INT PRIMARY KEY, digitsum INT, numstr STRING, INDEX s (digitsum)",
numRows,
sqlutils.ToRowFn(sqlutils.RowIdxFn, sumDigitsFn, sqlutils.RowEnglishFn))
kvDB := tc.Server(0).KVClient().(*client.DB)
desc := sqlbase.GetTableDescriptor(kvDB, "test", "t")
makeIndexSpan := func(start, end int) TableReaderSpan {
var span roachpb.Span
prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(desc, desc.Indexes[0].ID))
span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...)
span.EndKey = append(span.EndKey, prefix...)
span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...)
return TableReaderSpan{Span: span}
}
// Set up table readers on three hosts feeding data into a join reader on
// the third host. This is a basic test for the distributed flow
// infrastructure, including local and remote streams.
//
// Note that the ranges won't necessarily be local to the table readers, but
// that doesn't matter for the purposes of this test.
// Start a span (useful to look at spans using Lighstep).
sp, err := tracing.JoinOrNew(tracing.NewTracer(), nil, "cluster test")
if err != nil {
t.Fatal(err)
}
ctx := opentracing.ContextWithSpan(context.Background(), sp)
defer sp.Finish()
tr1 := TableReaderSpec{
Table: *desc,
IndexIdx: 1,
OutputColumns: []uint32{0, 1},
Spans: []TableReaderSpan{makeIndexSpan(0, 8)},
}
tr2 := TableReaderSpec{
Table: *desc,
IndexIdx: 1,
OutputColumns: []uint32{0, 1},
Spans: []TableReaderSpan{makeIndexSpan(8, 12)},
}
tr3 := TableReaderSpec{
Table: *desc,
IndexIdx: 1,
OutputColumns: []uint32{0, 1},
Spans: []TableReaderSpan{makeIndexSpan(12, 100)},
}
jr := JoinReaderSpec{
Table: *desc,
OutputColumns: []uint32{2},
}
txn := client.NewTxn(ctx, *kvDB)
fid := FlowID{uuid.MakeV4()}
req1 := &SetupFlowRequest{Txn: txn.Proto}
req1.Flow = FlowSpec{
FlowID: fid,
Processors: []ProcessorSpec{{
Core: ProcessorCoreUnion{TableReader: &tr1},
Output: []OutputRouterSpec{{
Type: OutputRouterSpec_MIRROR,
Streams: []StreamEndpointSpec{
{StreamID: 0, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}},
},
}},
}},
}
req2 := &SetupFlowRequest{Txn: txn.Proto}
req2.Flow = FlowSpec{
FlowID: fid,
Processors: []ProcessorSpec{{
Core: ProcessorCoreUnion{TableReader: &tr2},
Output: []OutputRouterSpec{{
Type: OutputRouterSpec_MIRROR,
Streams: []StreamEndpointSpec{
{StreamID: 1, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}},
//.........这里部分代码省略.........
开发者ID:knz,项目名称:cockroach,代码行数:101,代码来源:cluster_test.go
示例10: makeIndexJoin
// makeIndexJoin build an index join node.
// This destroys the original table scan node argument and reuses its
// storage to construct a new index scan node. A new table scan node
// is created separately as a member of the resulting index join node.
// The new index scan node is also returned alongside the new index join
// node.
func (p *planner) makeIndexJoin(
origScan *scanNode, exactPrefix int,
) (resultPlan *indexJoinNode, indexScan *scanNode) {
// Reuse the input argument's scanNode and its initialized parameters
// at a starting point to build the new indexScan node.
indexScan = origScan
// Create a new scanNode that will be used with the primary index.
table := p.Scan()
table.desc = origScan.desc
table.initDescDefaults(publicColumns)
table.initOrdering(0)
table.disableBatchLimit()
colIDtoRowIndex := map[sqlbase.ColumnID]int{}
for _, colID := range table.desc.PrimaryIndex.ColumnIDs {
idx, ok := indexScan.colIdxMap[colID]
if !ok {
panic(fmt.Sprintf("Unknown column %d in PrimaryIndex!", colID))
}
colIDtoRowIndex[colID] = idx
}
for _, colID := range indexScan.index.ColumnIDs {
idx, ok := indexScan.colIdxMap[colID]
if !ok {
panic(fmt.Sprintf("Unknown column %d in index!", colID))
}
colIDtoRowIndex[colID] = idx
}
// Transfer needed columns set to the table node.
table.setNeededColumns(origScan.valNeededForCol)
// For the index node, we need values for columns that are part of the index.
// TODO(radu): we could reduce this further - we only need the PK columns plus
// whatever filters may be used by the filter below.
valNeededIndex := make([]bool, len(origScan.valNeededForCol))
for _, idx := range colIDtoRowIndex {
valNeededIndex[idx] = true
}
indexScan.setNeededColumns(valNeededIndex)
if origScan.filter != nil {
// Transfer the filter to the table node. We must first convert the
// IndexedVars associated with indexNode.
convFunc := func(expr parser.VariableExpr) (ok bool, newExpr parser.VariableExpr) {
iv := expr.(*parser.IndexedVar)
return true, table.filterVars.IndexedVar(iv.Idx)
}
table.filter = exprConvertVars(origScan.filter, convFunc)
// Now we split the filter by extracting the part that can be evaluated using just the index
// columns.
splitFunc := func(expr parser.VariableExpr) (ok bool, newExpr parser.VariableExpr) {
colIdx := expr.(*parser.IndexedVar).Idx
if !indexScan.valNeededForCol[colIdx] {
return false, nil
}
return true, indexScan.filterVars.IndexedVar(colIdx)
}
indexScan.filter, table.filter = splitFilter(table.filter, splitFunc)
}
indexScan.initOrdering(exactPrefix)
primaryKeyPrefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(&table.desc, table.index.ID))
return &indexJoinNode{
index: indexScan,
table: table,
primaryKeyPrefix: primaryKeyPrefix,
colIDtoRowIndex: colIDtoRowIndex,
}, indexScan
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:80,代码来源:index_join.go
示例11: truncateAndBackfillColumnsChunk
//.........这里部分代码省略.........
table, err := sqlbase.GetTableDescFromID(txn, k)
if err != nil {
return err
}
fkTables[k] = tableLookup{table: table}
}
// TODO(dan): Tighten up the bound on the requestedCols parameter to
// makeRowUpdater.
requestedCols := make([]sqlbase.ColumnDescriptor, 0, len(tableDesc.Columns)+len(added))
requestedCols = append(requestedCols, tableDesc.Columns...)
requestedCols = append(requestedCols, added...)
ru, err := makeRowUpdater(
txn, tableDesc, fkTables, updateCols, requestedCols, rowUpdaterOnlyColumns,
)
if err != nil {
return err
}
// TODO(dan): This check is an unfortunate bleeding of the internals of
// rowUpdater. Extract the sql row to k/v mapping logic out into something
// usable here.
if !ru.isColumnOnlyUpdate() {
panic("only column data should be modified, but the rowUpdater is configured otherwise")
}
// Run a scan across the table using the primary key. Running
// the scan and applying the changes in many transactions is
// fine because the schema change is in the correct state to
// handle intermediate OLTP commands which delete and add
// values during the scan.
var rf sqlbase.RowFetcher
colIDtoRowIndex := colIDtoRowIndexFromCols(tableDesc.Columns)
valNeededForCol := make([]bool, len(tableDesc.Columns))
for i := range valNeededForCol {
_, valNeededForCol[i] = ru.fetchColIDtoRowIndex[tableDesc.Columns[i].ID]
}
if err := rf.Init(
tableDesc, colIDtoRowIndex, &tableDesc.PrimaryIndex, false, false,
tableDesc.Columns, valNeededForCol,
); err != nil {
return err
}
if err := rf.StartScan(
txn, roachpb.Spans{sp}, true /* limit batches */, chunkSize,
); err != nil {
return err
}
oldValues := make(parser.DTuple, len(ru.fetchCols))
writeBatch := txn.NewBatch()
rowLength := 0
var lastRowSeen parser.DTuple
i := int64(0)
for ; i < chunkSize; i++ {
row, err := rf.NextRow()
if err != nil {
return err
}
if row == nil {
break
}
lastRowSeen = row
if nonNullViolationColumnName != "" {
return sqlbase.NewNonNullViolationError(nonNullViolationColumnName)
}
copy(oldValues, row)
// Update oldValues with NULL values where values weren't found;
// only update when necessary.
if rowLength != len(row) {
rowLength = len(row)
for j := rowLength; j < len(oldValues); j++ {
oldValues[j] = parser.DNull
}
}
if _, err := ru.updateRow(txn.Context, writeBatch, oldValues, updateValues); err != nil {
return err
}
}
if err := txn.Run(writeBatch); err != nil {
return convertBackfillError(tableDesc, writeBatch)
}
if done = i < chunkSize; done {
return nil
}
curIndexKey, _, err := sqlbase.EncodeIndexKey(
tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, lastRowSeen,
sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID))
if err != nil {
return err
}
resume := roachpb.Span{Key: roachpb.Key(curIndexKey).PrefixEnd(), EndKey: sp.EndKey}
if err := sc.maybeWriteResumeSpan(txn, tableDesc, resume, mutationIdx, lastCheckpoint); err != nil {
return err
}
nextKey = resume.Key
return nil
})
return nextKey, done, err
}
开发者ID:knz,项目名称:cockroach,代码行数:101,代码来源:backfill.go
示例12: mainLoop
// mainLoop runs the mainLoop and returns any error.
// It does not close the output.
func (jr *joinReader) mainLoop() error {
primaryKeyPrefix := sqlbase.MakeIndexKeyPrefix(&jr.desc, jr.index.ID)
var alloc sqlbase.DatumAlloc
spans := make(roachpb.Spans, 0, joinReaderBatchSize)
ctx, span := tracing.ChildSpan(jr.ctx, "join reader")
defer tracing.FinishSpan(span)
txn := jr.flowCtx.setupTxn(ctx)
log.VEventf(ctx, 1, "starting (filter: %s)", &jr.filter)
if log.V(1) {
defer log.Infof(ctx, "exiting")
}
for {
// TODO(radu): figure out how to send smaller batches if the source has
// a soft limit (perhaps send the batch out if we don't get a result
// within a certain amount of time).
for spans = spans[:0]; len(spans) < joinReaderBatchSize; {
row, err := jr.input.NextRow()
if err != nil {
return err
}
if row == nil {
if len(spans) == 0 {
return nil
}
break
}
key, err := jr.generateKey(row, &alloc, primaryKeyPrefix)
if err != nil {
return err
}
spans = append(spans, roachpb.Span{
Key: key,
EndKey: key.PrefixEnd(),
})
}
err := jr.fetcher.StartScan(txn, spans, false /* no batch limits */, 0)
if err != nil {
log.Errorf(ctx, "scan error: %s", err)
return err
}
// TODO(radu): we are consuming all results from a fetch before starting
// the next batch. We could start the next batch early while we are
// outputting rows.
for {
outRow, err := jr.nextRow()
if err != nil {
return err
}
if outRow == nil {
// Done.
break
}
if log.V(3) {
log.Infof(ctx, "pushing row %s", outRow)
}
// Push the row to the output RowReceiver; stop if they don't need more
// rows.
if !jr.output.PushRow(outRow) {
log.VEventf(ctx, 1, "no more rows required")
return nil
}
}
if len(spans) != joinReaderBatchSize {
// This was the last batch.
return nil
}
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:79,代码来源:joinreader.go
示例13: TestTableReader
func TestTableReader(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
// Create a table where each row is:
//
// | a | b | sum | s |
// |-----------------------------------------------------------------|
// | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) |
aFn := func(row int) parser.Datum {
return parser.NewDInt(parser.DInt(row / 10))
}
bFn := func(row int) parser.Datum {
return parser.NewDInt(parser.DInt(row % 10))
}
sumFn := func(row int) parser.Datum {
return parser.NewDInt(parser.DInt(row/10 + row%10))
}
sqlutils.CreateTable(t, sqlDB, "t",
"a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)",
99,
sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn))
td := sqlbase.GetTableDescriptor(kvDB, "test", "t")
makeIndexSpan := func(start, end int) TableReaderSpan {
var span roachpb.Span
prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td, td.Indexes[0].ID))
span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...)
span.EndKey = append(span.EndKey, prefix...)
span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...)
return TableReaderSpan{Span: span}
}
testCases := []struct {
spec TableReaderSpec
expected string
}{
{
spec: TableReaderSpec{
Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, // sum < 5 && b != 3
OutputColumns: []uint32{0, 1},
},
expected: "[[0 1] [0 2] [0 4] [1 0] [1 1] [1 2] [2 0] [2 1] [2 2] [3 0] [3 1] [4 0]]",
},
{
spec: TableReaderSpec{
Filter: Expression{Expr: "@3 < 5 AND @2 != 3"},
OutputColumns: []uint32{3}, // s
HardLimit: 4,
},
expected: "[['one'] ['two'] ['four'] ['one-zero']]",
},
{
spec: TableReaderSpec{
IndexIdx: 1,
Reverse: true,
Spans: []TableReaderSpan{makeIndexSpan(4, 6)},
Filter: Expression{Expr: "@1 < 3"}, // sum < 8
OutputColumns: []uint32{0, 1},
SoftLimit: 1,
},
expected: "[[2 5] [1 5] [0 5] [2 4] [1 4] [0 4]]",
},
}
for _, c := range testCases {
ts := c.spec
ts.Table = *td
flowCtx := FlowCtx{
Context: context.Background(),
evalCtx: &parser.EvalContext{},
txnProto: &roachpb.Transaction{},
clientDB: kvDB,
}
out := &RowBuffer{}
tr, err := newTableReader(&flowCtx, &ts, out)
if err != nil {
t.Fatal(err)
}
tr.Run(nil)
if out.err != nil {
t.Fatal(out.err)
}
if !out.closed {
t.Fatalf("output RowReceiver not closed")
}
if result := out.rows.String(); result != c.expected {
t.Errorf("invalid results: %s, expected %s'", result, c.expected)
}
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:98,代码来源:tablereader_test.go
示例14: TestMakeSpans
//.........这里部分代码省略.........
{`(a = 5) OR (a, b) IN ((1, 1), (3, 3))`, `a,b`,
`/1/1-/1/2 /3/3-/3/4 /5-/6`, `/5-/4 /3/3-/3/2 /1/1-/1/0`},
// When encoding an end constraint for a maximal datum, we use
// bytes.PrefixEnd() to go beyond the normal encodings of that datatype.
{fmt.Sprintf(`a = %d`, math.MaxInt64), `a`,
`/9223372036854775807-/<varint 9223372036854775808 overflows int64>`,
`/9223372036854775807-/9223372036854775806`},
{fmt.Sprintf(`a = %d`, math.MinInt64), `a`,
`/-9223372036854775808-/-9223372036854775807`,
`/-9223372036854775808-/<varint 9223372036854775808 overflows int64>`},
{`(a, b) >= (1, 4)`, `a,b`, `/1/4-`, `-/1/3`},
{`(a, b) > (1, 4)`, `a,b`, `/1/5-`, `-/1/4`},
{`(a, b) < (1, 4)`, `a,b`, `/#-/1/4`, `/1/3-/#`},
{`(a, b) <= (1, 4)`, `a,b`, `/#-/1/5`, `/1/4-/#`},
{`(a, b) = (1, 4)`, `a,b`, `/1/4-/1/5`, `/1/4-/1/3`},
{`(a, b) != (1, 4)`, `a,b`, `/#-`, `-/#`},
}
for _, d := range testData {
for _, dir := range []encoding.Direction{encoding.Ascending, encoding.Descending} {
var expected string
if dir == encoding.Ascending {
expected = d.expectedAsc
} else {
expected = d.expectedDesc
}
t.Run(d.expr+"~"+expected, func(t *testing.T) {
sel := makeSelectNode(t)
columns := strings.Split(d.columns, ",")
dirs := make([]encoding.Direction, 0, len(columns))
for range columns {
dirs = append(dirs, dir)
}
desc, index := makeTestIndex(t, columns, dirs)
constraints, _ := makeConstraints(t, d.expr, desc, index, sel)
spans := makeSpans(constraints, desc, index)
s := sqlbase.PrettySpans(spans, 2)
s = keys.MassagePrettyPrintedSpanForTest(s, indexToDirs(index))
if expected != s {
t.Errorf("[index direction: %d] %s: expected %s, but found %s", dir, d.expr, expected, s)
}
})
}
}
// Test indexes with mixed-directions (some cols Asc, some cols Desc) and other edge cases.
testData2 := []struct {
expr string
columns string
expected string
}{
{`a = 1 AND b = 5`, `a,b-,c`, `/1/5-/1/4`},
{`a = 7 AND b IN (1,2,3) AND c = false`, `a,b-,c`,
`/7/3/0-/7/3/1 /7/2/0-/7/2/1 /7/1/0-/7/1/1`},
// Test different directions for te columns inside a tuple.
{`(a,b,j) IN ((1,2,3), (4,5,6))`, `a-,b,j-`, `/4/5/6-/4/5/5 /1/2/3-/1/2/2`},
{`k = b'\xff'`, `k`, `/"\xff"-/"\xff\x00"`},
// Test that limits on bytes work correctly: when encoding a descending limit for bytes,
// we need to go outside the bytes encoding.
// "\xaa" is encoded as [bytesDescMarker, ^0xaa, <term escape sequence>]
{`k = b'\xaa'`, `k-`,
fmt.Sprintf("raw:%c%c\xff\xfe-%c%c\xff\xff",
encoding.BytesDescMarker, ^byte(0xaa), encoding.BytesDescMarker, ^byte(0xaa))},
// Ensure tuples with differing index directions aren't constrained.
// TODO(mjibson): fix this, see #6346
{`(a, b) >= (1, 4)`, `a-,b`, `-`},
{`(a, b) >= (1, 4)`, `a,b-`, `-`},
}
for _, d := range testData2 {
t.Run(d.expr+"~"+d.expected, func(t *testing.T) {
sel := makeSelectNode(t)
desc, index := makeTestIndexFromStr(t, d.columns)
constraints, _ := makeConstraints(t, d.expr, desc, index, sel)
spans := makeSpans(constraints, desc, index)
var got string
raw := false
if strings.HasPrefix(d.expected, "raw:") {
raw = true
span := spans[0]
d.expected = d.expected[4:]
// Trim the index prefix from the span.
prefix := string(sqlbase.MakeIndexKeyPrefix(desc, index.ID))
got = strings.TrimPrefix(string(span.Key), prefix) + "-" +
strings.TrimPrefix(string(span.EndKey), prefix)
} else {
got = keys.MassagePrettyPrintedSpanForTest(sqlbase.PrettySpans(spans, 2),
indexToDirs(index))
}
if d.expected != got {
if !raw {
t.Errorf("%s: expected %s, but found %s", d.expr, d.expected, got)
} else {
t.Errorf("%s: expected %# x, but found %# x", d.expr, []byte(d.expected), got)
}
}
})
}
}
开发者ID:knz,项目名称:cockroach,代码行数:101,代码来源:index_selection_test.go
示例15: makeSpansForIndexConstraints
// makeSpansForIndexConstraints constructs the spans for an index given an
// instance of indexConstraints. The resulting spans are non-overlapping (by
// virtue of the input constraints being disjunct).
func makeSpansForIndexConstraints(
constraints indexConstraints, tableDesc *sqlbase.TableDescriptor, index *sqlbase.IndexDescriptor,
) roachpb.Spans {
prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(tableDesc, index.ID))
// We have one constraint per column, so each contributes something
// to the start and/or the end key of the span.
// But we also have (...) IN <tuple> constraints that span multiple columns.
// These constraints split each span, and that's how we can end up with
// multiple spans.
resultSpans := roachpb.Spans{{
Key: append(roachpb.Key(nil), prefix...),
EndKey: append(roachpb.Key(nil), prefix...),
}}
colIdx := 0
for i, c := range constraints {
// We perform special processing on the last end constraint to account for
// the exclusive nature of the scan end key.
lastEnd := (c.end != nil) &&
(i+1 == len(constraints) || constraints[i+1].end == nil)
// IN is handled separately.
if (c.start != nil && c.start.Operator == parser.In) ||
(c.end != nil && c.end.Operator == parser.In) {
resultSpans = applyInConstraint(resultSpans, c, colIdx, index, lastEnd)
} else {
dir, err := index.ColumnDirections[colIdx].ToEncodingDirection()
if err != nil {
panic(err)
}
if c.start != nil {
if dir == encoding.Ascending {
encodeStartConstraintAscending(resultSpans, c.start)
} else {
encodeStartConstraintDescending(resultSpans, c.start)
}
}
if c.end != nil {
if dir == encoding.Ascending {
encodeEndConstraintAscending(resultSpans, c.end, lastEnd)
} else {
encodeEndConstraintDescending(resultSpans, c.end, lastEnd)
}
}
}
colIdx += c.numColumns()
}
// If we had no end constraints, make it so that we scan the whole index.
if len(constraints) == 0 || constraints[0].end == nil {
for i := range resultSpans {
resultSpans[i].EndKey = resultSpans[i].EndKey.PrefixEnd()
}
}
// Remove any spans which are empty. This can happen for constraints such as
// "a > 1 AND a < 2" which we do not simplify to false but which is treated
// as "a >= 2 AND a < 2" for span generation.
n := 0
for _, s := range resultSpans {
if bytes.Compare(s.Key, s.EndKey) < 0 {
resultSpans[n] = s
n++
}
}
return resultSpans[:n]
}
开发者ID:veteranlu,项目名称:cockroach,代码行数:70,代码来源:index_selection.go
注:本文中的github.com/cockroachdb/cockroach/pkg/sql/sqlbase.MakeIndexKeyPrefix函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论