本文整理汇总了Golang中github.com/couchbase/sync_gateway/base.LogTo函数的典型用法代码示例。如果您正苦于以下问题:Golang LogTo函数的具体用法?Golang LogTo怎么用?Golang LogTo使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了LogTo函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: PushRevision
// Saves a new local revision to the external bucket.
func (s *Shadower) PushRevision(doc *document) {
defer func() { atomic.AddUint64(&s.pushCount, 1) }()
if !s.docIDMatches(doc.ID) {
return
} else if doc.newestRevID() == doc.UpstreamRev {
return // This revision was pulled from the external bucket, so don't push it back!
}
var err error
if doc.Flags&channels.Deleted != 0 {
base.LogTo("Shadow", "Pushing %q, rev %q [deletion]", doc.ID, doc.CurrentRev)
err = s.bucket.Delete(doc.ID)
} else {
base.LogTo("Shadow", "Pushing %q, rev %q", doc.ID, doc.CurrentRev)
body := doc.getRevision(doc.CurrentRev)
if body == nil {
base.Warn("Can't get rev %q.%q to push to external bucket", doc.ID, doc.CurrentRev)
return
}
err = s.bucket.Set(doc.ID, 0, body)
}
if err != nil {
base.Warn("Error pushing rev of %q to external bucket: %v", doc.ID, err)
}
}
开发者ID:arjunblue,项目名称:sync_gateway,代码行数:26,代码来源:shadower.go
示例2: addEntries
// Adds a set of log entries to a block. Returns:
// overflow Entries that didn't fit in the block
// pendingRemoval Entries with a parent that needs to be removed from the index,
// but the parent isn't in this block
func (d *DenseBlock) addEntries(entries []*LogEntry) (overflow []*LogEntry, pendingRemoval []*LogEntry, updateClock PartitionClock, err error) {
blockFull := false
partitionClock := make(PartitionClock)
for i, entry := range entries {
if !blockFull {
removalRequired, err := d.addEntry(entry)
base.LogTo("ChannelStorage+", "Adding entry to block. key:[%s] block:[%s] vb.seq:[%d.%d]", entry.DocID, d.Key, entry.VbNo, entry.Sequence)
if err != nil {
base.LogTo("ChannelStorage+", "Error adding entry to block. key:[%s] error:%v", entry.DocID, err)
return nil, nil, nil, err
}
partitionClock.SetSequence(entry.VbNo, entry.Sequence)
if removalRequired {
if pendingRemoval == nil {
pendingRemoval = make([]*LogEntry, 0)
}
pendingRemoval = append(pendingRemoval, entry)
}
if len(d.value) > MaxBlockSize {
blockFull = true
}
} else {
overflow = entries[i:]
break
}
}
return overflow, pendingRemoval, partitionClock, nil
}
开发者ID:couchbase,项目名称:sync_gateway,代码行数:34,代码来源:kv_dense_block.go
示例3: RemoveEntrySet
// Attempts to remove entries from the block
func (d *DenseBlock) RemoveEntrySet(entries []*LogEntry, bucket base.Bucket) (pendingRemoval []*LogEntry, err error) {
pendingRemoval = d.removeEntries(entries)
// If nothing was removed, don't update the block
if len(pendingRemoval) == len(entries) {
return entries, nil
}
casOut, writeErr := base.WriteCasRaw(bucket, d.Key, d.value, d.cas, 0, func(value []byte) (updatedValue []byte, err error) {
// Note: The following is invoked upon cas failure - may be called multiple times
d.value = value
d.clock = nil
pendingRemoval = d.removeEntries(entries)
// If nothing was removed, cancel the write
if len(pendingRemoval) == len(entries) {
return nil, nil
}
return d.value, nil
})
if writeErr != nil {
base.LogTo("ChannelStorage+", "Error writing block to database. %v", err)
return entries, writeErr
}
d.cas = casOut
if len(pendingRemoval) != len(entries) {
base.LogTo("ChannelStorage+", "Successfully removed set from block. key:[%s] #removed:[%d] #pending:[%d]",
d.Key, len(entries)-len(pendingRemoval), len(pendingRemoval))
}
return pendingRemoval, nil
}
开发者ID:couchbase,项目名称:sync_gateway,代码行数:32,代码来源:kv_dense_block.go
示例4: TakeDbOffline
func (dc *DatabaseContext) TakeDbOffline(reason string) error {
base.LogTo("CRUD", "Taking Database : %v, offline", dc.Name)
dbState := atomic.LoadUint32(&dc.State)
//If the DB is already trasitioning to: offline or is offline silently return
if dbState == DBOffline || dbState == DBResyncing || dbState == DBStopping {
return nil
}
if atomic.CompareAndSwapUint32(&dc.State, DBOnline, DBStopping) {
//notify all active _changes feeds to close
close(dc.ExitChanges)
base.LogTo("CRUD", "Waiting for all active calls to complete on Database : %v", dc.Name)
//Block until all current calls have returned, including _changes feeds
dc.AccessLock.Lock()
defer dc.AccessLock.Unlock()
base.LogTo("CRUD", "Database : %v, is offline", dc.Name)
//set DB state to Offline
atomic.StoreUint32(&dc.State, DBOffline)
if dc.EventMgr.HasHandlerForEvent(DBStateChange) {
dc.EventMgr.RaiseDBStateChangeEvent(dc.Name, "offline", reason, *dc.Options.AdminInterface)
}
return nil
} else {
base.LogTo("CRUD", "Unable to take Database offline, database must be in Online state")
return base.HTTPErrorf(http.StatusServiceUnavailable, "Unable to take Database offline, database must be in Online state")
}
}
开发者ID:vladoatanasov,项目名称:sync_gateway,代码行数:32,代码来源:database.go
示例5: getOrCreateReader
func (k *kvChangeIndexReader) getOrCreateReader(channelName string, options ChangesOptions) (*KvChannelIndex, error) {
// For continuous or longpoll processing, use the shared reader from the channelindexReaders map to coordinate
// polling.
if options.Wait {
var err error
index := k.getChannelReader(channelName)
if index == nil {
index, err = k.newChannelReader(channelName)
IndexExpvars.Add("getOrCreateReader_create", 1)
base.LogTo("DIndex+", "getOrCreateReader: Created new reader for channel %s", channelName)
} else {
IndexExpvars.Add("getOrCreateReader_get", 1)
base.LogTo("DIndex+", "getOrCreateReader: Using existing reader for channel %s", channelName)
}
return index, err
} else {
// For non-continuous/non-longpoll, use a one-off reader, no onChange handling.
indexPartitions, err := k.indexPartitionsCallback()
if err != nil {
return nil, err
}
return NewKvChannelIndex(channelName, k.indexReadBucket, indexPartitions, nil), nil
}
}
开发者ID:paulharter,项目名称:sync_gateway,代码行数:26,代码来源:kv_change_index_reader.go
示例6: readTapFeed
// Main loop that pulls changes from the external bucket. (Runs in its own goroutine.)
func (s *Shadower) readTapFeed() {
vbucketsFilling := 0
for event := range s.tapFeed.Events() {
switch event.Opcode {
case sgbucket.TapBeginBackfill:
if vbucketsFilling == 0 {
base.LogTo("Shadow", "Reading history of external bucket")
}
vbucketsFilling++
//base.LogTo("Shadow", "Reading history of external bucket")
case sgbucket.TapMutation, sgbucket.TapDeletion:
key := string(event.Key)
if !s.docIDMatches(key) {
break
}
isDeletion := event.Opcode == sgbucket.TapDeletion
if !isDeletion && event.Expiry > 0 {
break // ignore ephemeral documents
}
err := s.pullDocument(key, event.Value, isDeletion, event.Sequence, event.Flags)
if err != nil {
base.Warn("Error applying change from external bucket: %v", err)
}
atomic.AddUint64(&s.pullCount, 1)
case sgbucket.TapEndBackfill:
if vbucketsFilling--; vbucketsFilling == 0 {
base.LogTo("Shadow", "Caught up with history of external bucket")
}
}
}
base.LogTo("Shadow", "End of tap feed(?)")
}
开发者ID:arjunblue,项目名称:sync_gateway,代码行数:33,代码来源:shadower.go
示例7: DocChanged
// Given a newly changed document (received from the tap feed), adds change entries to channels.
// The JSON must be the raw document from the bucket, with the metadata and all.
func (c *changeCache) DocChanged(docID string, docJSON []byte) {
entryTime := time.Now()
// ** This method does not directly access any state of c, so it doesn't lock.
go func() {
// Is this a user/role doc?
if strings.HasPrefix(docID, auth.UserKeyPrefix) {
c.processPrincipalDoc(docID, docJSON, true)
return
} else if strings.HasPrefix(docID, auth.RoleKeyPrefix) {
c.processPrincipalDoc(docID, docJSON, false)
return
}
// First unmarshal the doc (just its metadata, to save time/memory):
doc, err := unmarshalDocumentSyncData(docJSON, false)
if err != nil || !doc.hasValidSyncData() {
base.Warn("changeCache: Error unmarshaling doc %q: %v", docID, err)
return
}
if doc.Sequence <= c.initialSequence {
return // Tap is sending us an old value from before I started up; ignore it
}
// Record a histogram of the Tap feed's lag:
tapLag := time.Since(doc.TimeSaved) - time.Since(entryTime)
lagMs := int(tapLag/(100*time.Millisecond)) * 100
changeCacheExpvars.Add(fmt.Sprintf("lag-tap-%04dms", lagMs), 1)
// If the doc update wasted any sequences due to conflicts, add empty entries for them:
for _, seq := range doc.UnusedSequences {
base.LogTo("Cache", "Received unused #%d for (%q / %q)", seq, docID, doc.CurrentRev)
change := &LogEntry{
Sequence: seq,
TimeReceived: time.Now(),
}
c.processEntry(change)
}
// Now add the entry for the new doc revision:
change := &LogEntry{
Sequence: doc.Sequence,
DocID: docID,
RevID: doc.CurrentRev,
Flags: doc.Flags,
TimeReceived: time.Now(),
TimeSaved: doc.TimeSaved,
Channels: doc.Channels,
}
base.LogTo("Cache", "Received #%d after %3dms (%q / %q)", change.Sequence, int(tapLag/time.Millisecond), change.DocID, change.RevID)
changedChannels := c.processEntry(change)
if c.onChange != nil && len(changedChannels) > 0 {
c.onChange(changedChannels)
}
}()
}
开发者ID:guoyu07,项目名称:sync_gateway,代码行数:59,代码来源:change_cache.go
示例8: prependChanges
// Prepends an array of entries to this one, skipping ones that I already have.
// The new array needs to overlap with my current log, i.e. must contain the same sequence as
// c.logs[0], otherwise nothing will be added because the method can't confirm that there are no
// missing sequences in between.
// Returns the number of entries actually prepended.
func (c *channelCache) prependChanges(changes LogEntries, changesValidFrom uint64, openEnded bool) int {
c.lock.Lock()
defer c.lock.Unlock()
log := c.logs
if len(log) == 0 {
// If my cache is empty, just copy the new changes:
if len(changes) > 0 {
if !openEnded && changes[len(changes)-1].Sequence < c.validFrom {
return 0 // changes might not go all the way to the current time
}
if excess := len(changes) - c.options.channelCacheMaxLength; excess > 0 {
changes = changes[excess:]
changesValidFrom = changes[0].Sequence
}
c.logs = make(LogEntries, len(changes))
copy(c.logs, changes)
base.LogTo("Cache", " Initialized cache of %q with %d entries from view (#%d--#%d)",
c.channelName, len(changes), changes[0].Sequence, changes[len(changes)-1].Sequence)
}
c.validFrom = changesValidFrom
c.addDocIDs(changes)
return len(changes)
} else if len(changes) == 0 {
if openEnded && changesValidFrom < c.validFrom {
c.validFrom = changesValidFrom
}
return 0
} else {
// Look for an overlap, and prepend everything up to that point:
firstSequence := log[0].Sequence
if changes[0].Sequence <= firstSequence {
for i := len(changes) - 1; i >= 0; i-- {
if changes[i].Sequence == firstSequence {
if excess := i + len(log) - c.options.channelCacheMaxLength; excess > 0 {
changes = changes[excess:]
changesValidFrom = changes[0].Sequence
i -= excess
}
if i > 0 {
newLog := make(LogEntries, 0, i+len(log))
newLog = append(newLog, changes[0:i]...)
newLog = append(newLog, log...)
c.logs = newLog
base.LogTo("Cache", " Added %d entries from view (#%d--#%d) to cache of %q",
i, changes[0].Sequence, changes[i-1].Sequence, c.channelName)
}
c.validFrom = changesValidFrom
return i
}
}
}
return 0
}
}
开发者ID:arjunblue,项目名称:sync_gateway,代码行数:62,代码来源:channel_cache.go
示例9: pullDocument
// Gets an external document and applies it as a new revision to the managed document.
func (s *Shadower) pullDocument(key string, value []byte, isDeletion bool, cas uint64, flags uint32) error {
var body Body
if isDeletion {
body = Body{"_deleted": true}
} else {
if err := json.Unmarshal(value, &body); err != nil {
base.LogTo("Shadow", "Doc %q is not JSON; skipping", key)
return nil
}
}
db, _ := CreateDatabase(s.context)
expiry, err := body.getExpiry()
if err != nil {
return base.HTTPErrorf(http.StatusBadRequest, "Invalid expiry: %v", err)
}
_, err = db.updateDoc(key, false, expiry, func(doc *document) (Body, AttachmentData, error) {
// (Be careful: this block can be invoked multiple times if there are races!)
if doc.UpstreamCAS != nil && *doc.UpstreamCAS == cas {
return nil, nil, couchbase.UpdateCancel // we already have this doc revision
}
base.LogTo("Shadow+", "Pulling %q, CAS=%x ... have UpstreamRev=%q, UpstreamCAS=%x", key, cas, doc.UpstreamRev, doc.UpstreamCAS)
// Compare this body to the current revision body to see if it's an echo:
parentRev := doc.UpstreamRev
newRev := doc.CurrentRev
if !reflect.DeepEqual(body, doc.getRevision(newRev)) {
// Nope, it's not. Assign it a new rev ID
generation, _ := parseRevID(parentRev)
newRev = createRevID(generation+1, parentRev, body)
}
doc.UpstreamRev = newRev
doc.UpstreamCAS = &cas
body["_rev"] = newRev
if doc.History[newRev] == nil {
// It's a new rev, so add it to the history:
if parentRev != "" && !doc.History.contains(parentRev) {
// parent rev does not exist in the doc history
// set parentRev to "", this will create a new conflicting
//branch in the revtree
base.Warn("Shadow: Adding revision as conflict branch, parent id %q is missing", parentRev)
parentRev = ""
}
doc.History.addRevision(RevInfo{ID: newRev, Parent: parentRev, Deleted: isDeletion})
base.LogTo("Shadow", "Pulling %q, CAS=%x --> rev %q", key, cas, newRev)
} else {
// We already have this rev; but don't cancel, because we do need to update the
// doc's UpstreamRev/UpstreamCAS fields.
base.LogTo("Shadow+", "Not pulling %q, CAS=%x (echo of rev %q)", key, cas, newRev)
}
return body, nil, nil
})
if err == couchbase.UpdateCancel {
err = nil
}
return err
}
开发者ID:paulharter,项目名称:sync_gateway,代码行数:58,代码来源:shadower.go
示例10: getOldRevisionJSON
// Looks up the raw JSON data of a revision that's been archived to a separate doc.
// If the revision isn't found (e.g. has been deleted by compaction) returns 404 error.
func (db *DatabaseContext) getOldRevisionJSON(docid string, revid string) ([]byte, error) {
data, _, err := db.Bucket.GetRaw(oldRevisionKey(docid, revid))
if base.IsDocNotFoundError(err) {
base.LogTo("CRUD+", "No old revision %q / %q", docid, revid)
err = base.HTTPErrorf(404, "missing")
}
if data != nil {
base.LogTo("CRUD+", "Got old revision %q / %q --> %d bytes", docid, revid, len(data))
}
return data, err
}
开发者ID:vladoatanasov,项目名称:sync_gateway,代码行数:13,代码来源:revision.go
示例11: handleDbOnline
// Take a DB online, first reload the DB config
func (h *handler) handleDbOnline() error {
h.assertAdminOnly()
dbState := atomic.LoadUint32(&h.db.State)
//If the DB is already trasitioning to: online or is online silently return
if dbState == db.DBOnline || dbState == db.DBStarting {
return nil
}
//If the DB is currently re-syncing return an error asking the user to retry later
if dbState == db.DBResyncing {
return base.HTTPErrorf(http.StatusServiceUnavailable, "Database _resync is in progress, this may take some time, try again later")
}
body, err := h.readBody()
if err != nil {
return err
}
var input struct {
Delay int `json:"delay"`
}
input.Delay = kDefaultDBOnlineDelay
json.Unmarshal(body, &input)
base.LogTo("CRUD", "Taking Database : %v, online in %v seconds", h.db.Name, input.Delay)
timer := time.NewTimer(time.Duration(input.Delay) * time.Second)
go func() {
<-timer.C
//Take a write lock on the Database context, so that we can cycle the underlying Database
// without any other call running concurrently
h.db.AccessLock.Lock()
defer h.db.AccessLock.Unlock()
//We can only transition to Online from Offline state
if atomic.CompareAndSwapUint32(&h.db.State, db.DBOffline, db.DBStarting) {
if _, err := h.server.ReloadDatabaseFromConfig(h.db.Name, true); err != nil {
base.LogError(err)
return
}
//Set DB state to DBOnline, this wil cause new API requests to be be accepted
atomic.StoreUint32(&h.server.databases_[h.db.Name].State, db.DBOnline)
} else {
base.LogTo("CRUD", "Unable to take Database : %v, online after %v seconds, database must be in Offline state", h.db.Name, input.Delay)
}
}()
return nil
}
开发者ID:joeljeske,项目名称:sync_gateway,代码行数:55,代码来源:admin_api.go
示例12: processEntry
// Handles a newly-arrived LogEntry.
func (c *changeCache) processEntry(change *LogEntry) base.Set {
c.lock.Lock()
defer c.lock.Unlock()
if c.logsDisabled {
return nil
}
sequence := change.Sequence
nextSequence := c.nextSequence
if _, found := c.receivedSeqs[sequence]; found {
base.LogTo("Cache+", " Ignoring duplicate of #%d", sequence)
return nil
}
c.receivedSeqs[sequence] = struct{}{}
// FIX: c.receivedSeqs grows monotonically. Need a way to remove old sequences.
var changedChannels base.Set
if sequence == nextSequence || nextSequence == 0 {
// This is the expected next sequence so we can add it now:
changedChannels = c._addToCache(change)
// Also add any pending sequences that are now contiguous:
changedChannels = changedChannels.Union(c._addPendingLogs())
} else if sequence > nextSequence {
// There's a missing sequence (or several), so put this one on ice until it arrives:
heap.Push(&c.pendingLogs, change)
numPending := len(c.pendingLogs)
base.LogTo("Cache", " Deferring #%d (%d now waiting for #%d...#%d)",
sequence, numPending, nextSequence, c.pendingLogs[0].Sequence-1)
changeCacheExpvars.Get("maxPending").(*base.IntMax).SetIfMax(int64(numPending))
if numPending > c.options.CachePendingSeqMaxNum {
// Too many pending; add the oldest one:
changedChannels = c._addPendingLogs()
}
} else if sequence > c.initialSequence {
// Out-of-order sequence received!
// Remove from skipped sequence queue
if !c.WasSkipped(sequence) {
// Error removing from skipped sequences
base.LogTo("Cache", " Received unexpected out-of-order change - not in skippedSeqs (seq %d, expecting %d) doc %q / %q", sequence, nextSequence, change.DocID, change.RevID)
} else {
base.LogTo("Cache", " Received previously skipped out-of-order change (seq %d, expecting %d) doc %q / %q ", sequence, nextSequence, change.DocID, change.RevID)
change.Skipped = true
}
changedChannels = c._addToCache(change)
// Add to cache before removing from skipped, to ensure lowSequence doesn't get incremented until results are available
// in cache
c.RemoveSkipped(sequence)
}
return changedChannels
}
开发者ID:mindhash,项目名称:sync_gateway,代码行数:53,代码来源:change_cache.go
示例13: handleOidcTestProviderAuthenticate
/*
* This is not part of the OAuth 2.0 spec, it is used to handle the
* user credentials entered in the login form
* Authenticate the user credentials POST'd from the Web form
* against the db users
* Return an OAuth 2.0 Authorization Response
*/
func (h *handler) handleOidcTestProviderAuthenticate() error {
if !h.db.DatabaseContext.Options.UnsupportedOptions.OidcTestProvider.Enabled {
return base.HTTPErrorf(http.StatusForbidden, "OIDC test provider is not enabled")
}
requestParams := h.rq.URL.Query()
username := h.rq.FormValue("username")
tokenttl, err := strconv.Atoi(h.rq.FormValue("tokenttl"))
if err != nil {
tokenttl = defaultIdTokenTTL
}
tokenDuration := time.Duration(tokenttl) * time.Second
authenticated := h.rq.FormValue("authenticated")
redirect_uri := requestParams.Get("redirect_uri")
base.LogTo("OIDC+", "handleOidcTestProviderAuthenticate() called. username: %s authenticated: %s", username, authenticated)
if username == "" || authenticated == "" {
base.LogTo("OIDC+", "user did not enter valid credentials -- username or authenticated is empty")
error := "?error=invalid_request&error_description=User failed authentication"
h.setHeader("Location", requestParams.Get("redirect_uri")+error)
h.response.WriteHeader(http.StatusFound)
return nil
}
scope := requestParams.Get("scope")
scopeMap := scopeStringToMap(scope)
//Generate the return code by base64 encoding the username
code := base64.StdEncoding.EncodeToString([]byte(username))
authCodeTokenMap[username] = AuthState{CallbackURL: redirect_uri, TokenTTL: tokenDuration, Scopes: scopeMap}
location_url, err := url.Parse(redirect_uri)
if err != nil {
return err
}
query := location_url.Query()
query.Set("code", code)
query.Set("state", "af0ifjsldkj")
location_url.RawQuery = query.Encode()
h.setHeader("Location", location_url.String())
h.response.WriteHeader(http.StatusFound)
return nil
}
开发者ID:paulharter,项目名称:sync_gateway,代码行数:58,代码来源:oidc_test_provider.go
示例14: AddBlock
// Creates a new block, and adds to the block list
func (l *DenseBlockList) AddBlock() (*DenseBlock, error) {
// Mark previous block inactive
if l.activeBlock != nil {
l.activeBlock.MarkInactive()
}
nextIndex := l.generateNextBlockIndex()
var nextStartClock PartitionClock
if l.activeBlock == nil {
// No previous active block - new block list
nextStartClock = make(PartitionClock)
} else {
// Determine index and startclock from previous active block
nextStartClock = l.activeBlock.getCumulativeClock()
}
base.LogTo("ChannelStorage+", "Adding block to list. channel:[%s] partition:[%d] index:[%d]", l.channelName, l.partition, nextIndex)
nextBlockKey := l.generateBlockKey(nextIndex)
block := NewDenseBlock(nextBlockKey, nextStartClock)
// Add the new block to the list
listEntry := DenseBlockListEntry{
BlockIndex: nextIndex,
StartClock: nextStartClock,
}
l.blocks = append(l.blocks, listEntry)
// Do a CAS-safe write of the active list
value, err := l.marshalActive()
if err != nil {
return nil, err
}
casOut, err := l.indexBucket.WriteCas(l.activeKey, 0, 0, l.activeCas, value, sgbucket.Raw)
if err != nil {
// CAS error. If there's a concurrent writer for this partition, assume they have created the new block.
// Re-initialize the current block list, and get the active block key from there.
l.initDenseBlockList()
if len(l.blocks) == 0 {
return nil, fmt.Errorf("Unable to determine active block after DenseBlockList cas write failure")
}
latestEntry := l.blocks[len(l.blocks)-1]
return NewDenseBlock(l.generateBlockKey(latestEntry.BlockIndex), latestEntry.StartClock), nil
}
l.activeCas = casOut
l.activeBlock = block
base.LogTo("ChannelStorage+", "Successfully added block to list. channel:[%s] partition:[%d] index:[%d]", l.channelName, l.partition, nextIndex)
return block, nil
}
开发者ID:couchbase,项目名称:sync_gateway,代码行数:52,代码来源:kv_dense_channel_storage.go
示例15: changesFeed
// Creates a Go-channel of all the changes made on a channel.
// Does NOT handle the Wait option. Does NOT check authorization.
func (db *Database) changesFeed(channel string, options ChangesOptions) (<-chan *ChangeEntry, error) {
dbExpvars.Add("channelChangesFeeds", 1)
log, err := db.changeCache.GetChanges(channel, options)
base.LogTo("DIndex+", "[changesFeed] Found %d changes for channel %s", len(log), channel)
if err != nil {
return nil, err
}
if len(log) == 0 {
// There are no entries newer than 'since'. Return an empty feed:
feed := make(chan *ChangeEntry)
close(feed)
return feed, nil
}
feed := make(chan *ChangeEntry, 1)
go func() {
defer close(feed)
// Now write each log entry to the 'feed' channel in turn:
for _, logEntry := range log {
if !options.Conflicts && (logEntry.Flags&channels.Hidden) != 0 {
//continue // FIX: had to comment this out.
// This entry is shadowed by a conflicting one. We would like to skip it.
// The problem is that if this is the newest revision of this doc, then the
// doc will appear under this sequence # in the changes view, which means
// we won't emit the doc at all because we already stopped emitting entries
// from the view before this point.
}
if logEntry.Sequence >= options.Since.TriggeredBy {
options.Since.TriggeredBy = 0
}
seqID := SequenceID{
Seq: logEntry.Sequence,
TriggeredBy: options.Since.TriggeredBy,
}
change := makeChangeEntry(logEntry, seqID, channel)
base.LogTo("Changes+", "Sending seq:%v from channel %s", seqID, channel)
select {
case <-options.Terminator:
base.LogTo("Changes+", "Aborting changesFeed")
return
case feed <- &change:
}
}
}()
return feed, nil
}
开发者ID:basotia,项目名称:sync_gateway,代码行数:51,代码来源:changes.go
示例16: GetChanges
func (k *kvChangeIndexReader) GetChanges(channelName string, options ChangesOptions) ([]*LogEntry, error) {
var sinceClock base.SequenceClock
if options.Since.Clock == nil {
// If there's no since clock, we may be in backfill for another channel - revert to the triggered by clock.
if options.Since.TriggeredByClock != nil {
sinceClock = options.Since.TriggeredByClock
} else {
sinceClock = base.NewSequenceClockImpl()
}
} else {
sinceClock = options.Since.Clock
}
reader, err := k.getOrCreateReader(channelName, options)
if err != nil {
base.Warn("Error obtaining channel reader (need partition index?) for channel %s", channelName)
return nil, err
}
changes, err := reader.getChanges(sinceClock)
if err != nil {
base.LogTo("DIndex+", "No clock found for channel %s, assuming no entries in index", channelName)
return nil, nil
}
// Limit handling
if options.Limit > 0 && len(changes) > options.Limit {
limitResult := make([]*LogEntry, options.Limit)
copy(limitResult[0:], changes[0:])
return limitResult, nil
}
return changes, nil
}
开发者ID:paulharter,项目名称:sync_gateway,代码行数:34,代码来源:kv_change_index_reader.go
示例17: processPrincipalDoc
func (c *changeCache) processPrincipalDoc(docID string, docJSON []byte, isUser bool) {
// Currently the cache isn't really doing much with user docs; mostly it needs to know about
// them because they have sequence numbers, so without them the sequence of sequences would
// have gaps in it, causing later sequences to get stuck in the queue.
princ, err := c.unmarshalPrincipal(docJSON, isUser)
if princ == nil {
base.Warn("changeCache: Error unmarshaling doc %q: %v", docID, err)
return
}
sequence := princ.Sequence()
c.lock.RLock()
initialSequence := c.initialSequence
c.lock.RUnlock()
if sequence <= initialSequence {
return // Tap is sending us an old value from before I started up; ignore it
}
// Now add the (somewhat fictitious) entry:
change := &LogEntry{
Sequence: sequence,
TimeReceived: time.Now(),
}
if isUser {
change.DocID = "_user/" + princ.Name()
} else {
change.DocID = "_role/" + princ.Name()
}
base.LogTo("Cache", "Received #%d (%q)", change.Sequence, change.DocID)
changedChannels := c.processEntry(change)
if c.onChange != nil && len(changedChannels) > 0 {
c.onChange(changedChannels)
}
}
开发者ID:paulharter,项目名称:sync_gateway,代码行数:35,代码来源:change_cache.go
示例18: setOldRevisionJSON
func (db *Database) setOldRevisionJSON(docid string, revid string, body []byte) error {
base.LogTo("CRUD+", "Saving old revision %q / %q (%d bytes)", docid, revid, len(body))
// Set old revisions to expire after 5 minutes. Future enhancement to make this a config
// setting might be appropriate.
return db.Bucket.SetRaw(oldRevisionKey(docid, revid), 300, body)
}
开发者ID:vladoatanasov,项目名称:sync_gateway,代码行数:7,代码来源:revision.go
示例19: backupAncestorRevs
// Moves a revision's ancestor's body out of the document object and into a separate db doc.
func (db *Database) backupAncestorRevs(doc *document, revid string) error {
// Find an ancestor that still has JSON in the document:
var json []byte
for {
if revid = doc.History.getParent(revid); revid == "" {
return nil // No ancestors with JSON found
} else if json = doc.getRevisionJSON(revid); json != nil {
break
}
}
// Store the JSON as a separate doc in the bucket:
if err := db.setOldRevisionJSON(doc.ID, revid, json); err != nil {
// This isn't fatal since we haven't lost any information; just warn about it.
base.Warn("backupAncestorRevs failed: doc=%q rev=%q err=%v", doc.ID, revid, err)
return err
}
// Nil out the rev's body in the document struct:
if revid == doc.CurrentRev {
doc.body = nil
} else {
doc.History.setRevisionBody(revid, nil)
}
base.LogTo("CRUD+", "Backed up obsolete rev %q/%q", doc.ID, revid)
return nil
}
开发者ID:mindhash,项目名称:sync_gateway,代码行数:28,代码来源:crud.go
示例20: updateAccess
// Updates a document's channel/role UserAccessMap with new access settings from an AccessMap.
// Returns an array of the user/role names whose access has changed as a result.
func (accessMap *UserAccessMap) updateAccess(doc *document, newAccess channels.AccessMap) (changedUsers []string) {
// Update users already appearing in doc.Access:
for name, access := range *accessMap {
if access.UpdateAtSequence(newAccess[name], doc.Sequence) {
if len(access) == 0 {
delete(*accessMap, name)
}
changedUsers = append(changedUsers, name)
}
}
// Add new users who are in newAccess but not accessMap:
for name, access := range newAccess {
if _, existed := (*accessMap)[name]; !existed {
if *accessMap == nil {
*accessMap = UserAccessMap{}
}
(*accessMap)[name] = channels.AtSequence(access, doc.Sequence)
changedUsers = append(changedUsers, name)
}
}
if changedUsers != nil {
what := "channel"
if accessMap == &doc.RoleAccess {
what = "role"
}
base.LogTo("Access", "Doc %q grants %s access: %v", doc.ID, what, *accessMap)
}
return changedUsers
}
开发者ID:guoyu07,项目名称:sync_gateway,代码行数:31,代码来源:document.go
注:本文中的github.com/couchbase/sync_gateway/base.LogTo函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论