• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Golang base.NewSequenceClockImpl函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Golang中github.com/couchbase/sync_gateway/base.NewSequenceClockImpl函数的典型用法代码示例。如果您正苦于以下问题:Golang NewSequenceClockImpl函数的具体用法?Golang NewSequenceClockImpl怎么用?Golang NewSequenceClockImpl使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了NewSequenceClockImpl函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。

示例1: GetStableClock

func (k *kvChangeIndexReader) GetStableClock() (clock base.SequenceClock, err error) {

	// Validation partition map is available.
	_, err = k.indexPartitionsCallback()
	if err != nil {
		// Unable to load partitions.  Check whether the index has data (stable counter is non-zero)
		count, err := base.LoadClockCounter(base.KStableSequenceKey, k.indexReadBucket)
		// Index has data, but we can't get partition map.  Return error
		if err == nil && count > 0 {
			return nil, errors.New("Error: Unable to retrieve index partition map, but index counter exists")
		} else {
			// Index doesn't have data.  Return zero clock as stable clock
			return base.NewSequenceClockImpl(), nil
		}
	}

	clock = base.NewSequenceClockImpl()
	stableShardedClock, err := k.loadStableSequence()
	if err != nil {
		base.Warn("Stable sequence and clock not found in index - returning err")
		return nil, err
	} else {
		clock = stableShardedClock.AsClock()
	}

	return clock, nil
}
开发者ID:paulharter,项目名称:sync_gateway,代码行数:27,代码来源:kv_change_index_reader.go


示例2: parseClockSequenceID

func parseClockSequenceID(str string, sequenceHasher *sequenceHasher) (s SequenceID, err error) {

	if str == "" {
		return SequenceID{
			SeqType: ClockSequenceType,
			Clock:   base.NewSequenceClockImpl(),
		}, nil
	}

	s.SeqType = ClockSequenceType
	components := strings.Split(str, ":")
	if len(components) == 1 {
		// Convert simple zero to empty clock, to handle clients sending zero to mean 'no previous since'
		if components[0] == "0" {
			s.Clock = base.NewSequenceClockImpl()
		} else {
			// Standard clock hash
			if s.Clock, err = sequenceHasher.GetClock(components[0]); err != nil {
				return SequenceID{}, err
			}
		}
	} else if len(components) == 2 {
		// TriggeredBy Clock Hash, and vb.seq sequence
		if s.TriggeredByClock, err = sequenceHasher.GetClock(components[0]); err != nil {
			return SequenceID{}, err
		}
		sequenceComponents := strings.Split(components[1], ".")
		if len(sequenceComponents) != 2 {
			base.Warn("Unexpected sequence format - ignoring and relying on triggered by")
			return
		} else {
			if vb64, err := strconv.ParseUint(sequenceComponents[0], 10, 16); err != nil {
				base.Warn("Unable to convert sequence %v to int.", sequenceComponents[0])
			} else {
				s.vbNo = uint16(vb64)
				s.Seq, err = strconv.ParseUint(sequenceComponents[1], 10, 64)
			}
		}

	} else if len(components) == 3 {
		// Low hash, and vb.seq sequence.  Use low hash as clock, ignore vb.seq
		if s.Clock, err = sequenceHasher.GetClock(components[0]); err != nil {
			return SequenceID{}, err
		}

	} else {
		err = base.HTTPErrorf(400, "Invalid sequence")
	}

	if err != nil {
		err = base.HTTPErrorf(400, "Invalid sequence")
	}
	return s, err
}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:54,代码来源:sequence_id.go


示例3: CouchbaseOnlyTestHashExpiry

// Tests hash expiry.  Requires a real couchbase server bucket - walrus doesn't support expiry yet
func CouchbaseOnlyTestHashExpiry(t *testing.T) {
	// Create a hasher with a small range (0-256) and short expiry for testing
	seqHasher, err := testSequenceHasher(8, 5)
	defer seqHasher.bucket.Close()
	assertNoError(t, err, "Error creating new sequence hasher")

	// Add first hash entry
	clock := base.NewSequenceClockImpl()
	clock.SetSequence(50, 100)
	clock.SetSequence(80, 20)
	clock.SetSequence(150, 150)
	hashValue, err := seqHasher.GetHash(clock)
	assertNoError(t, err, "Error creating hash")
	// Validate that expiry is reset every time sequence for hash is requested.
	for i := 0; i < 20; i++ {
		clockBack, err := seqHasher.GetClock(hashValue)
		assertNoError(t, err, "Error getting clock")
		assert.Equals(t, clockBack.GetSequence(50), uint64(100))
		time.Sleep(2 * time.Second)
	}

	// Validate it disappears after expiry time when no active requests
	time.Sleep(10 * time.Second)
	clockBack, err := seqHasher.GetClock(hashValue)
	assertNoError(t, err, "Error getting clock")
	log.Println("Got clockback:", clockBack)
	assert.Equals(t, clockBack.GetSequence(50), uint64(0))

}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:30,代码来源:sequence_hasher_test.go


示例4: TestConcurrentHashStorage

func TestConcurrentHashStorage(t *testing.T) {
	// Create a hasher with a small range (0-256) for testing
	seqHasher, err := testSequenceHasher(8, 0)
	defer seqHasher.bucket.Close()
	assertNoError(t, err, "Error creating new sequence hasher")

	// Simulate multiple processes writing hashes for different clocks concurrently - ensure cache is still valid
	var wg sync.WaitGroup
	for i := 0; i < 20; i++ {
		wg.Add(1)
		go func(i int) {
			defer wg.Done()
			clock := base.NewSequenceClockImpl()
			clock.SetSequence(uint16(i), uint64(i))
			value, err := seqHasher.GetHash(clock)
			assertNoError(t, err, "Error getting hash")
			assert.Equals(t, value, fmt.Sprintf("%d-0", i))
		}(i)
	}
	wg.Wait()

	// Retrieve values
	for i := 0; i < 20; i++ {
		loadedClock, err := seqHasher.GetClock(fmt.Sprintf("%d-0", i))
		assertTrue(t, err == nil, "Shouldn't return error")
		assert.Equals(t, loadedClock.GetSequence(uint16(i)), uint64(i))
	}
}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:28,代码来源:sequence_hasher_test.go


示例5: AddEntrySet

// Adds a set
func (b *BitFlagStorage) AddEntrySet(entries []*LogEntry) (clockUpdates base.SequenceClock, err error) {

	// Update the sequences in the appropriate cache block
	if len(entries) == 0 {
		return clockUpdates, nil
	}

	// The set of updates may be distributed over multiple partitions and blocks.
	// To support this, iterate over the set, and define groups of sequences by block
	// TODO: this approach feels like it's generating a lot of GC work.  Considered an iterative
	//       approach where a set update returned a list of entries that weren't targeted at the
	//       same block as the first entry in the list, but this would force sequential
	//       processing of the blocks.  Might be worth revisiting if we see high GC overhead.
	blockSets := make(BlockSet)
	clockUpdates = base.NewSequenceClockImpl()
	for _, entry := range entries {
		// Update the sequence in the appropriate cache block
		base.LogTo("DIndex+", "Add to channel index [%s], vbNo=%d, isRemoval:%v", b.channelName, entry.VbNo, entry.isRemoved())
		blockKey := GenerateBlockKey(b.channelName, entry.Sequence, b.partitions.VbMap[entry.VbNo])
		if _, ok := blockSets[blockKey]; !ok {
			blockSets[blockKey] = make([]*LogEntry, 0)
		}
		blockSets[blockKey] = append(blockSets[blockKey], entry)
		clockUpdates.SetMaxSequence(entry.VbNo, entry.Sequence)
	}

	err = b.writeBlockSetsWithCas(blockSets)
	if err != nil {
		base.Warn("Error writing blockSets with cas for block %s: %+v", blockSets, err)
	}

	return clockUpdates, err
}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:34,代码来源:kv_channel_storage.go


示例6: getClockForMap

func getClockForMap(values map[uint16]uint64) base.SequenceClock {
	clock := base.NewSequenceClockImpl()
	for vb, seq := range values {
		clock.SetSequence(vb, seq)
	}
	return clock
}
开发者ID:couchbase,项目名称:sync_gateway,代码行数:7,代码来源:kv_dense_channel_storage_test.go


示例7: readChangesOptionsFromJSON

func (h *handler) readChangesOptionsFromJSON(jsonData []byte) (feed string, options db.ChangesOptions, filter string, channelsArray []string, docIdsArray []string, compress bool, err error) {
	var input struct {
		Feed           string        `json:"feed"`
		Since          db.SequenceID `json:"since"`
		Limit          int           `json:"limit"`
		Style          string        `json:"style"`
		IncludeDocs    bool          `json:"include_docs"`
		Filter         string        `json:"filter"`
		Channels       string        `json:"channels"` // a filter query param, so it has to be a string
		DocIds         []string      `json:"doc_ids"`
		HeartbeatMs    *uint64       `json:"heartbeat"`
		TimeoutMs      *uint64       `json:"timeout"`
		AcceptEncoding string        `json:"accept_encoding"`
		ActiveOnly     bool          `json:"active_only"` // Return active revisions only
	}
	// Initialize since clock and hasher ahead of unmarshalling sequence
	if h.db != nil && h.db.SequenceType == db.ClockSequenceType {
		input.Since.Clock = base.NewSequenceClockImpl()
		input.Since.SeqType = h.db.SequenceType
		input.Since.SequenceHasher = h.db.SequenceHasher
	}
	if err = json.Unmarshal(jsonData, &input); err != nil {
		return
	}
	feed = input.Feed
	options.Since = input.Since
	options.Limit = input.Limit

	options.Conflicts = input.Style == "all_docs"
	options.ActiveOnly = input.ActiveOnly

	options.IncludeDocs = input.IncludeDocs
	filter = input.Filter

	if input.Channels != "" {
		channelsArray = strings.Split(input.Channels, ",")
	}

	docIdsArray = input.DocIds

	options.HeartbeatMs = getRestrictedInt(
		input.HeartbeatMs,
		kDefaultHeartbeatMS,
		kMinHeartbeatMS,
		h.server.config.MaxHeartbeat*1000,
		true,
	)

	options.TimeoutMs = getRestrictedInt(
		input.TimeoutMs,
		kDefaultTimeoutMS,
		0,
		kMaxTimeoutMS,
		true,
	)

	compress = (input.AcceptEncoding == "gzip")

	return
}
开发者ID:ethanfrey,项目名称:sync_gateway,代码行数:60,代码来源:changes_api.go


示例8: getZeroSequence

func getZeroSequence(db *Database) ChangesOptions {
	if db.SequenceType == IntSequenceType {
		return ChangesOptions{Since: SequenceID{Seq: 0}}
	} else {
		return ChangesOptions{Since: SequenceID{Clock: base.NewSequenceClockImpl()}}
	}
}
开发者ID:paulharter,项目名称:sync_gateway,代码行数:7,代码来源:changes_test.go


示例9: GetChanges

func (k *kvChangeIndexReader) GetChanges(channelName string, options ChangesOptions) ([]*LogEntry, error) {

	var sinceClock base.SequenceClock
	if options.Since.Clock == nil {
		// If there's no since clock, we may be in backfill for another channel - revert to the triggered by clock.
		if options.Since.TriggeredByClock != nil {
			sinceClock = options.Since.TriggeredByClock
		} else {
			sinceClock = base.NewSequenceClockImpl()
		}
	} else {
		sinceClock = options.Since.Clock
	}

	reader, err := k.getOrCreateReader(channelName, options)
	if err != nil {
		base.Warn("Error obtaining channel reader (need partition index?) for channel %s", channelName)
		return nil, err
	}
	changes, err := reader.getChanges(sinceClock)
	if err != nil {
		base.LogTo("DIndex+", "No clock found for channel %s, assuming no entries in index", channelName)
		return nil, nil
	}

	// Limit handling
	if options.Limit > 0 && len(changes) > options.Limit {
		limitResult := make([]*LogEntry, options.Limit)
		copy(limitResult[0:], changes[0:])
		return limitResult, nil
	}

	return changes, nil
}
开发者ID:paulharter,项目名称:sync_gateway,代码行数:34,代码来源:kv_change_index_reader.go


示例10: simpleClockSequence

// Returns a clock-base SequenceID with all vb values set to seq
func simpleClockSequence(seq uint64) SequenceID {
	result := SequenceID{
		SeqType: ClockSequenceType,
		Clock:   base.NewSequenceClockImpl(),
	}
	for i := 0; i < 1024; i++ {
		result.Clock.SetSequence(uint16(i), seq)
	}
	return result
}
开发者ID:basotia,项目名称:sync_gateway,代码行数:11,代码来源:kv_change_index_test.go


示例11: loadClock

func (k *kvChannelIndex) loadClock() {

	if k.clock == nil {
		k.clock = base.NewSequenceClockImpl()
	}
	data, cas, err := k.indexBucket.GetRaw(getChannelClockKey(k.channelName))
	if err != nil {
		base.LogTo("DIndex+", "Unable to find existing channel clock for channel %s - treating as new", k.channelName)
	}
	k.clock.Unmarshal(data)
	k.clock.SetCas(cas)
}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:12,代码来源:kv_channel_index.go


示例12: GetClock

func (s *sequenceHasher) GetClock(sequence string) (base.SequenceClock, error) {

	clock := base.NewSequenceClockImpl()
	var err error
	var seqHash sequenceHash

	components := strings.Split(sequence, "-")
	if len(components) == 1 {
		seqHash.hashValue, err = strconv.ParseUint(sequence, 10, 64)
		if err != nil {
			return clock, errors.New(fmt.Sprintf("Error converting hash sequence %s to string: %v", sequence, err))
		}
	} else if len(components) == 2 {
		seqHash.hashValue, err = strconv.ParseUint(components[0], 10, 64)
		if err != nil {
			return clock, errors.New(fmt.Sprintf("Error converting hash sequence %s to string: %v", sequence, err))
		}
		index, err := strconv.ParseUint(components[1], 10, 16)
		seqHash.collisionIndex = uint16(index)
		if err != nil {
			return clock, errors.New(fmt.Sprintf("Error converting collision index %s to int: %v", components[1], err))
		}
	}

	cachedValue := s.getCacheValue(seqHash.hashValue)
	storedClocks, loadErr := cachedValue.load(s.loadClocks)
	if loadErr != nil {
		return clock, loadErr
	}

	if uint16(len(storedClocks.Sequences)) <= seqHash.collisionIndex {
		return clock, errors.New(fmt.Sprintf("Stored hash not found for sequence [%s], returning zero clock", sequence))
	}
	clock = base.NewSequenceClockImpl()
	clock.Init(storedClocks.Sequences[seqHash.collisionIndex], seqHash.String())
	return clock, nil

}
开发者ID:diesal11,项目名称:sync_gateway,代码行数:38,代码来源:sequence_hasher.go


示例13: TestHashCalculation

func TestHashCalculation(t *testing.T) {
	// Create a hasher with a small range (0-256) for testing
	seqHasher, err := testSequenceHasher(8, 0)
	defer seqHasher.bucket.Close()
	assertNoError(t, err, "Error creating new sequence hasher")
	clock := base.NewSequenceClockImpl()
	clock.SetSequence(50, 100)
	clock.SetSequence(80, 20)
	clock.SetSequence(150, 150)
	hashValue := seqHasher.calculateHash(clock)
	assert.Equals(t, hashValue, uint64(14)) // (100 + 20 + 150) mod 256

	clock.SetSequence(55, 300)
	clock.SetSequence(200, 513)
	hashValue = seqHasher.calculateHash(clock)
	assert.Equals(t, hashValue, uint64(59)) // (100 + 20 + 150 + (300 mod 256) + (513 mod 256)) mod 256

}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:18,代码来源:sequence_hasher_test.go


示例14: getChannelClock

func (k *kvChannelIndex) getChannelClock() (base.SequenceClock, error) {

	var channelClock base.SequenceClock
	var err error
	// If we're polling, return a copy
	k.lastPolledLock.RLock()
	defer k.lastPolledLock.RUnlock()
	if k.lastPolledChannelClock != nil {
		channelClock = base.NewSequenceClockImpl()
		channelClock.SetTo(k.lastPolledChannelClock)
	} else {
		channelClock, err = k.loadChannelClock()
		if err != nil {
			return nil, err
		}
	}
	return channelClock, nil

}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:19,代码来源:kv_channel_index.go


示例15: getChanges

// Returns the set of index entries for the channel more recent than the
// specified since SequenceClock.  Index entries with sequence values greater than
// the index stable sequence are not returned.
func (k *kvChannelIndex) getChanges(since base.SequenceClock) ([]*LogEntry, error) {

	var results []*LogEntry

	// Someone is still interested in this channel - reset poll counts
	atomic.StoreUint32(&k.pollCount, 0)
	atomic.StoreUint32(&k.unreadPollCount, 0)

	chanClock, err := k.getChannelClock()
	if err != nil {
		// Note: gocb returns "Key not found.", go-couchbase returns "MCResponse status=KEY_ENOENT, opcode=GET, opaque=0, msg: Not found"
		// Using string matching to identify key not found for now - really need a better API in go-couchbase/gocb for gets that allows us to distinguish
		// between errors and key not found with something more robust than string matching.
		if IsNotFoundError(err) {
			// initialize chanClock as empty clock
			chanClock = base.NewSequenceClockImpl()
		} else {
			return results, err
		}
	}

	// If requested clock is later than the channel clock, return empty
	if since.AllAfter(chanClock) {
		base.LogTo("DIndex+", "requested clock is later than channel clock - no new changes to report")
		return results, nil
	}

	// If the since value is more recent than the last polled clock, return the results from the
	// last polling.  Has the potential to return values earlier than since and later than
	// lastPolledClock, but these duplicates will be ignored by replication.  Could validate
	// greater than since inside this if clause, but leaving out as a performance optimization for
	// now
	if lastPolledResults := k.checkLastPolled(since); len(lastPolledResults) > 0 {
		indexExpvars.Add("getChanges_lastPolled_hit", 1)
		return lastPolledResults, nil
	}
	indexExpvars.Add("getChanges_lastPolled_miss", 1)

	return k.channelStorage.GetChanges(since, chanClock)
}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:43,代码来源:kv_channel_index.go


示例16: initializeChannelFeeds

// Creates a go-channel of ChangeEntry for each channel in channelsSince.  Each go-channel sends the ordered entries for that channel.
func (db *Database) initializeChannelFeeds(channelsSince channels.TimedSet, options ChangesOptions, addedChannels base.Set, userVbNo uint16) ([]<-chan *ChangeEntry, error) {
	// Populate the  array of feed channels:
	feeds := make([]<-chan *ChangeEntry, 0, len(channelsSince))

	base.LogTo("Changes+", "GotChannelSince... %v", channelsSince)
	for name, vbSeqAddedAt := range channelsSince {
		seqAddedAt := vbSeqAddedAt.Sequence
		// If there's no vbNo on the channelsSince, it indicates a user doc channel grant - use the userVbNo.
		var vbAddedAt uint16
		if vbSeqAddedAt.VbNo == nil {
			vbAddedAt = userVbNo
		} else {
			vbAddedAt = *vbSeqAddedAt.VbNo
		}

		base.LogTo("Changes+", "Starting for channel... %s, %d", name, seqAddedAt)
		chanOpts := options

		// Check whether requires backfill based on addedChannels in this _changes feed
		isNewChannel := false
		if addedChannels != nil {
			_, isNewChannel = addedChannels[name]
		}

		// Three possible scenarios for backfill handling, based on whether the incoming since value indicates a backfill in progress
		// for this channel, and whether the channel requires a new backfill to be started
		//   Case 1. No backfill in progress, no backfill required - use the incoming since to get changes
		//   Case 2. No backfill in progress, backfill required for this channel.  Get changes since zero, backfilling to the incoming since
		//   Case 3. Backfill in progress.  Get changes since zero, backfilling to incoming triggered by, filtered to later than incoming since.
		backfillInProgress := false
		if options.Since.TriggeredByClock != nil {
			// There's a backfill in progress for SOME channel - check if it's this one
			if options.Since.TriggeredByClock.GetSequence(vbAddedAt) == seqAddedAt {
				backfillInProgress = true
			}
		}

		sinceSeq := getChangesClock(options.Since).GetSequence(vbAddedAt)
		backfillRequired := vbSeqAddedAt.Sequence > 0 && sinceSeq < seqAddedAt

		if isNewChannel || (backfillRequired && !backfillInProgress) {
			// Case 2.  No backfill in progress, backfill required
			base.LogTo("Changes+", "Starting backfill for channel... %s, %d", name, seqAddedAt)
			chanOpts.Since = SequenceID{
				Seq:              0,
				vbNo:             0,
				Clock:            base.NewSequenceClockImpl(),
				TriggeredBy:      seqAddedAt,
				TriggeredByVbNo:  vbAddedAt,
				TriggeredByClock: getChangesClock(options.Since).Copy(),
			}
		} else if backfillInProgress {
			// Case 3.  Backfill in progress.
			chanOpts.Since = SequenceID{
				Seq:              options.Since.Seq,
				vbNo:             options.Since.vbNo,
				Clock:            base.NewSequenceClockImpl(),
				TriggeredBy:      seqAddedAt,
				TriggeredByVbNo:  vbAddedAt,
				TriggeredByClock: options.Since.TriggeredByClock,
			}
		} else {
			// Case 1.  Leave chanOpts.Since set to options.Since.
		}
		feed, err := db.vectorChangesFeed(name, chanOpts)
		if err != nil {
			base.Warn("MultiChangesFeed got error reading changes feed %q: %v", name, err)
			return feeds, err
		}
		feeds = append(feeds, feed)
	}

	// If the user object has changed, create a special pseudo-feed for it:
	if db.user != nil {
		feeds, _ = db.appendVectorUserFeed(feeds, []string{}, options, userVbNo)
	}
	return feeds, nil
}
开发者ID:diesal11,项目名称:sync_gateway,代码行数:79,代码来源:index_changes.go


示例17: pollReaders

func (k *kvChangeIndexReader) pollReaders() bool {
	k.channelIndexReaderLock.Lock()
	defer k.channelIndexReaderLock.Unlock()

	if len(k.channelIndexReaders) == 0 {
		return true
	}

	// Build the set of clock keys to retrieve.  Stable sequence, plus one per channel reader
	keySet := make([]string, len(k.channelIndexReaders))
	index := 0
	for _, reader := range k.channelIndexReaders {
		keySet[index] = GetChannelClockKey(reader.channelName)
		index++
	}
	bulkGetResults, err := k.indexReadBucket.GetBulkRaw(keySet)

	if err != nil {
		base.Warn("Error retrieving channel clocks: %v", err)
	}
	IndexExpvars.Add("bulkGet_channelClocks", 1)
	IndexExpvars.Add("bulkGet_channelClocks_keyCount", int64(len(keySet)))
	changedChannels := make(chan string, len(k.channelIndexReaders))
	cancelledChannels := make(chan string, len(k.channelIndexReaders))

	var wg sync.WaitGroup
	for _, reader := range k.channelIndexReaders {
		// For each channel, unmarshal new channel clock, then check with reader whether this represents changes
		wg.Add(1)
		go func(reader *KvChannelIndex, wg *sync.WaitGroup) {
			defer func() {
				wg.Done()
			}()
			// Unmarshal channel clock.  If not present in the bulk get results, use empty clock to support
			// channels that don't have any indexed data yet.  If clock was previously found successfully (i.e. empty clock is
			// due to temporary error from server), empty clock treated safely as a non-update by pollForChanges.
			clockKey := GetChannelClockKey(reader.channelName)
			var newChannelClock *base.SequenceClockImpl
			clockBytes, found := bulkGetResults[clockKey]
			if !found {
				newChannelClock = base.NewSequenceClockImpl()
			} else {
				var err error
				newChannelClock, err = base.NewSequenceClockForBytes(clockBytes)
				if err != nil {
					base.Warn("Error unmarshalling channel clock - skipping polling for channel %s: %v", reader.channelName, err)
					return
				}
			}

			// Poll for changes
			hasChanges, cancelPolling := reader.pollForChanges(k.readerStableSequence.AsClock(), newChannelClock)
			if hasChanges {
				changedChannels <- reader.channelName
			}
			if cancelPolling {
				cancelledChannels <- reader.channelName
			}

		}(reader, &wg)
	}

	wg.Wait()
	close(changedChannels)
	close(cancelledChannels)

	// Build channel set from the changed channels
	var channels []string
	for channelName := range changedChannels {
		channels = append(channels, channelName)
	}

	if len(channels) > 0 && k.onChange != nil {
		k.onChange(base.SetFromArray(channels))
	}

	// Remove cancelled channels from channel readers
	for channelName := range cancelledChannels {
		IndexExpvars.Add("pollingChannels_active", -1)
		delete(k.channelIndexReaders, channelName)
	}

	return true
}
开发者ID:paulharter,项目名称:sync_gateway,代码行数:84,代码来源:kv_change_index_reader.go


示例18: indexEntries

// Index a group of entries.  Iterates over the entry set to build updates per channel, then
// updates using channel index.
func (k *kvChangeIndexWriter) indexEntries(entries []*LogEntry, indexPartitions base.IndexPartitionMap, channelStorage ChannelStorage) error {

	channelSets := make(map[string][]*LogEntry)
	updatedSequences := base.NewSequenceClockImpl()

	// Wait group tracks when the current buffer has been completely processed
	var entryWg sync.WaitGroup
	entryErrorCount := uint32(0)
	// Iterate over entries to write index entry docs, and group entries for subsequent channel index updates
	for _, logEntry := range entries {
		// If principal, update the stable sequence and continue
		if logEntry.IsPrincipal {
			updatedSequences.SetSequence(logEntry.VbNo, logEntry.Sequence)
			continue
		}

		// Remove channels from entry to save space in memory, index entries
		ch := logEntry.Channels

		// Add index log entry if needed
		if channelStorage.StoresLogEntries() {
			entryWg.Add(1)
			go func(logEntry *LogEntry, errorCount uint32) {
				defer entryWg.Done()
				err := channelStorage.WriteLogEntry(logEntry)
				if err != nil {
					atomic.AddUint32(&errorCount, 1)
				}
			}(logEntry, entryErrorCount)
		}
		// Collect entries by channel
		for channelName, removal := range ch {
			if removal == nil || removal.RevID == logEntry.RevID {
				// Store by channel and partition, to avoid having to iterate over results again in the channel index to group by partition
				_, found := channelSets[channelName]
				if !found {
					// TODO: maxCacheUpdate may be unnecessarily large memory allocation here
					channelSets[channelName] = make([]*LogEntry, 0, maxCacheUpdate)
				}
				if removal != nil {
					removalEntry := *logEntry
					removalEntry.Flags |= channels.Removed
					channelSets[channelName] = append(channelSets[channelName], &removalEntry)
				} else {
					channelSets[channelName] = append(channelSets[channelName], logEntry)
				}
			}
		}
		if EnableStarChannelLog {
			_, found := channelSets[channels.UserStarChannel]
			if !found {
				// TODO: maxCacheUpdate may be unnecessarily large memory allocation here
				channelSets[channels.UserStarChannel] = make([]*LogEntry, 0, maxCacheUpdate)
			}
			channelSets[channels.UserStarChannel] = append(channelSets[channels.UserStarChannel], logEntry)
		}

		// Track vbucket sequences for clock update
		updatedSequences.SetSequence(logEntry.VbNo, logEntry.Sequence)
	}

	// Wait group tracks when the current buffer has been completely processed
	var channelWg sync.WaitGroup

	channelErrorCount := uint32(0)
	// Iterate over channel sets to update channel index
	for channelName, entrySet := range channelSets {
		channelWg.Add(1)
		go func(channelName string, entrySet []*LogEntry, errorCount uint32) {
			defer channelWg.Done()
			err := k.addSetToChannelIndex(channelName, entrySet)
			if err != nil {
				atomic.AddUint32(&errorCount, 1)
			}
		}(channelName, entrySet, channelErrorCount)
	}

	// Wait for entry and channel processing to complete
	entryWg.Wait()
	channelWg.Wait()
	if atomic.LoadUint32(&entryErrorCount) > 0 || atomic.LoadUint32(&channelErrorCount) > 0 {
		return errors.New("Unrecoverable error indexing entry or channel")
	}

	// Update stable sequence
	err := k.getWriterStableSequence().UpdateAndWrite(updatedSequences)
	return err
}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:90,代码来源:kv_change_index_writer.go


示例19: TestHashStorage

func TestHashStorage(t *testing.T) {
	// Create a hasher with a small range (0-256) for testing
	seqHasher, err := testSequenceHasher(8, 0)
	defer seqHasher.bucket.Close()
	assertNoError(t, err, "Error creating new sequence hasher")

	// Add first hash entry
	clock := base.NewSequenceClockImpl()
	clock.SetSequence(50, 100)
	clock.SetSequence(80, 20)
	clock.SetSequence(150, 150)
	hashValue, err := seqHasher.GetHash(clock)
	assertNoError(t, err, "Error getting hash")
	assert.Equals(t, hashValue, "14-0")

	// Add different hash entry
	clock2 := base.NewSequenceClockImpl()
	clock2.SetSequence(50, 1)
	clock2.SetSequence(80, 2)
	clock2.SetSequence(150, 5)
	hashValue2, err := seqHasher.GetHash(clock2)
	assertNoError(t, err, "Error getting hash")
	assert.Equals(t, hashValue2, "8-0")

	// Retrieve first hash entry
	clockBack, err := seqHasher.GetClock(hashValue)
	assertNoError(t, err, "Error getting clock")
	assert.Equals(t, clockBack.GetSequence(50), uint64(100))
	assert.Equals(t, clockBack.GetSequence(80), uint64(20))
	assert.Equals(t, clockBack.GetSequence(150), uint64(150))

	// Create hash for the first clock again - ensure retrieves existing, and doesn't create new
	hashValue, err = seqHasher.GetHash(clock)
	assertNoError(t, err, "Error getting hash")
	assert.Equals(t, hashValue, "14-0")

	// Add a second clock that hashes to the same value
	secondClock := base.NewSequenceClockImpl()
	secondClock.SetSequence(50, 100)
	secondClock.SetSequence(80, 20)
	secondClock.SetSequence(150, 150)
	secondClock.SetSequence(300, 256)
	hashValue, err = seqHasher.GetHash(secondClock)
	assertNoError(t, err, "Error getting hash")
	assert.Equals(t, hashValue, "14-1")

	// Simulate multiple processes requesting a hash for the same clock concurrently - ensures cas write checks
	// whether clock has already been added before writing
	var wg sync.WaitGroup
	for i := 0; i < 20; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			thirdClock := base.NewSequenceClockImpl()
			thirdClock.SetSequence(50, 100)
			thirdClock.SetSequence(80, 20)
			thirdClock.SetSequence(150, 150)
			thirdClock.SetSequence(300, 256)
			thirdClock.SetSequence(500, 256)
			value, err := seqHasher.GetHash(thirdClock)
			assertNoError(t, err, "Error getting hash")
			assert.Equals(t, value, "14-2")
		}()
	}
	wg.Wait()

	// Retrieve non-existent hash
	missingClock, err := seqHasher.GetClock("1234")
	assertTrue(t, err != nil, "Should return error for non-existent hash")
	assert.Equals(t, missingClock.GetSequence(50), uint64(0))
	assert.Equals(t, missingClock.GetSequence(80), uint64(0))
	assert.Equals(t, missingClock.GetSequence(150), uint64(0))
}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:73,代码来源:sequence_hasher_test.go



注:本文中的github.com/couchbase/sync_gateway/base.NewSequenceClockImpl函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Golang base.SetFromArray函数代码示例发布时间:2022-05-23
下一篇:
Golang base.Logf函数代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap