本文整理汇总了Golang中github.com/influxdb/influxdb/parser.QuerySpec类的典型用法代码示例。如果您正苦于以下问题:Golang QuerySpec类的具体用法?Golang QuerySpec怎么用?Golang QuerySpec使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了QuerySpec类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: getShardsAndProcessor
func (self *Coordinator) getShardsAndProcessor(querySpec *parser.QuerySpec, writer engine.Processor) ([]*cluster.ShardData, engine.Processor, error) {
shards, err := self.clusterConfiguration.GetShardsForQuery(querySpec)
if err != nil {
return nil, nil, err
}
shouldAggregateLocally := shards.ShouldAggregateLocally(querySpec)
q := querySpec.SelectQuery()
if q == nil {
return shards, writer, nil
}
if !shouldAggregateLocally {
// if we should aggregate in the coordinator (i.e. aggregation
// isn't happening locally at the shard level), create an engine
shardIds := make([]uint32, len(shards))
for i, s := range shards {
shardIds[i] = s.Id()
}
writer, err = engine.NewQueryEngine(writer, q, shardIds)
if err != nil {
log.Error(err)
log.Debug("Coordinator processor chain: %s", engine.ProcessorChain(writer))
}
return shards, writer, err
}
// if we have a query with limit, then create an engine, or we can
// make the passthrough limit aware
writer = engine.NewPassthroughEngineWithLimit(writer, 100, q.Limit)
return shards, writer, nil
}
开发者ID:carriercomm,项目名称:facette,代码行数:32,代码来源:coordinator.go
示例2: QueryResponseBufferSize
func (self *ShardData) QueryResponseBufferSize(querySpec *parser.QuerySpec, batchPointSize int) int {
groupByTime := querySpec.GetGroupByInterval()
if groupByTime == nil {
// If the group by time is nil, we shouldn't have to use a buffer since the shards should be queried sequentially.
// However, set this to something high just to be safe.
log.Debug("BUFFER SIZE: 1000")
return 1000
}
tickCount := int(self.shardNanoseconds / uint64(*groupByTime))
if tickCount < 10 {
tickCount = 100
} else if tickCount > 1000 {
// cap this because each response should have up to this number of points in it.
tickCount = tickCount / batchPointSize
// but make sure it's at least 1k
if tickCount < 1000 {
tickCount = 1000
}
}
columnCount := querySpec.GetGroupByColumnCount()
if columnCount > 1 {
// we don't really know the cardinality for any column up front. This is a just a multiplier so we'll see how this goes.
// each response can have many points, so having a buffer of the ticks * 100 should be safe, but we'll see.
tickCount = tickCount * 100
}
log.Debug("BUFFER SIZE: %d", tickCount)
return tickCount
}
开发者ID:vovkasm,项目名称:facette,代码行数:30,代码来源:shard.go
示例3: runListSeriesQuery
func (self *Coordinator) runListSeriesQuery(querySpec *parser.QuerySpec, p engine.Processor) error {
allSeries := self.clusterConfiguration.MetaStore.GetSeriesForDatabase(querySpec.Database())
matchingSeries := allSeries
if q := querySpec.Query().GetListSeriesQuery(); q.HasRegex() {
matchingSeries = nil
regex := q.GetRegex()
for _, s := range allSeries {
if !regex.MatchString(s) {
continue
}
matchingSeries = append(matchingSeries, s)
}
}
name := "list_series_result"
fields := []string{"name"}
points := make([]*protocol.Point, len(matchingSeries), len(matchingSeries))
for i, s := range matchingSeries {
fieldValues := []*protocol.FieldValue{{StringValue: proto.String(s)}}
points[i] = &protocol.Point{Values: fieldValues}
}
seriesResult := &protocol.Series{Name: &name, Fields: fields, Points: points}
_, err := p.Yield(seriesResult)
return err
}
开发者ID:ostark,项目名称:influxdb,代码行数:26,代码来源:coordinator.go
示例4: getShardsToMatchQuery
func (self *ClusterConfiguration) getShardsToMatchQuery(querySpec *parser.QuerySpec) ([]*ShardData, error) {
self.shardLock.RLock()
defer self.shardLock.RUnlock()
seriesNames, fromRegex := querySpec.TableNamesAndRegex()
db := querySpec.Database()
if fromRegex != nil {
seriesNames = self.MetaStore.GetSeriesForDatabaseAndRegex(db, fromRegex)
}
uniqueShards := make(map[uint32]*ShardData)
for _, name := range seriesNames {
if fs := self.MetaStore.GetFieldsForSeries(db, name); len(fs) == 0 {
return nil, fmt.Errorf("Couldn't find series: %s", name)
}
space := self.getShardSpaceToMatchSeriesName(db, name)
if space == nil {
continue
}
for _, shard := range space.shards {
uniqueShards[shard.id] = shard
}
}
shards := make([]*ShardData, 0, len(uniqueShards))
for _, shard := range uniqueShards {
shards = append(shards, shard)
}
SortShardsByTimeDescending(shards)
return shards, nil
}
开发者ID:ericcapricorn,项目名称:influxdb,代码行数:28,代码来源:cluster_configuration.go
示例5: executeListSeriesQuery
func (self *LevelDbShard) executeListSeriesQuery(querySpec *parser.QuerySpec, processor cluster.QueryProcessor) error {
it := self.db.NewIterator(self.readOptions)
defer it.Close()
database := querySpec.Database()
seekKey := append(DATABASE_SERIES_INDEX_PREFIX, []byte(querySpec.Database()+"~")...)
it.Seek(seekKey)
dbNameStart := len(DATABASE_SERIES_INDEX_PREFIX)
for it = it; it.Valid(); it.Next() {
key := it.Key()
if len(key) < dbNameStart || !bytes.Equal(key[:dbNameStart], DATABASE_SERIES_INDEX_PREFIX) {
break
}
dbSeries := string(key[dbNameStart:])
parts := strings.Split(dbSeries, "~")
if len(parts) > 1 {
if parts[0] != database {
break
}
name := parts[1]
shouldContinue := processor.YieldPoint(&name, nil, nil)
if !shouldContinue {
return nil
}
}
}
return nil
}
开发者ID:Kenterfie,项目名称:influxdb,代码行数:28,代码来源:level_db_shard.go
示例6: Query
func (self *Shard) Query(querySpec *parser.QuerySpec, processor cluster.QueryProcessor) error {
if querySpec.IsListSeriesQuery() {
return self.executeListSeriesQuery(querySpec, processor)
} else if querySpec.IsDeleteFromSeriesQuery() {
return self.executeDeleteQuery(querySpec, processor)
}
seriesAndColumns := querySpec.SelectQuery().GetReferencedColumns()
if !self.hasReadAccess(querySpec) {
return errors.New("User does not have access to one or more of the series requested.")
}
for series, columns := range seriesAndColumns {
if regex, ok := series.GetCompiledRegex(); ok {
seriesNames := self.metaStore.GetSeriesForDatabaseAndRegex(querySpec.Database(), regex)
for _, name := range seriesNames {
if !querySpec.HasReadAccess(name) {
continue
}
err := self.executeQueryForSeries(querySpec, name, columns, processor)
if err != nil {
return err
}
}
} else {
err := self.executeQueryForSeries(querySpec, series.Name, columns, processor)
if err != nil {
return err
}
}
}
return nil
}
开发者ID:WIZARD-CXY,项目名称:golang-devops-stuff,代码行数:34,代码来源:shard.go
示例7: GetShardsForQuery
func (self *ClusterConfiguration) GetShardsForQuery(querySpec *parser.QuerySpec) Shards {
shards := self.getShardsToMatchQuery(querySpec)
shards = self.getShardRange(querySpec, shards)
if querySpec.IsAscending() {
SortShardsByTimeAscending(shards)
}
return shards
}
开发者ID:aiyi,项目名称:influxdb,代码行数:8,代码来源:cluster_configuration.go
示例8: runDeleteQuery
func (self *CoordinatorImpl) runDeleteQuery(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error {
user := querySpec.User()
db := querySpec.Database()
if ok, err := self.permissions.AuthorizeDeleteQuery(user, db); !ok {
return err
}
querySpec.RunAgainstAllServersInShard = true
return self.runQuerySpec(querySpec, seriesWriter)
}
开发者ID:Kenterfie,项目名称:influxdb,代码行数:9,代码来源:coordinator.go
示例9: runQuerySpec
func (self *CoordinatorImpl) runQuerySpec(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error {
shards, processor, seriesClosed, err := self.getShardsAndProcessor(querySpec, seriesWriter)
if err != nil {
return err
}
if len(shards) == 0 {
return fmt.Errorf("Couldn't look up columns")
}
defer func() {
if processor != nil {
processor.Close()
<-seriesClosed
} else {
seriesWriter.Close()
}
}()
shardConcurrentLimit := self.config.ConcurrentShardQueryLimit
if self.shouldQuerySequentially(shards, querySpec) {
log.Debug("Querying shards sequentially")
shardConcurrentLimit = 1
}
log.Debug("Shard concurrent limit: %d", shardConcurrentLimit)
errors := make(chan error, shardConcurrentLimit)
for i := 0; i < shardConcurrentLimit; i++ {
errors <- nil
}
responseChannels := make(chan (<-chan *protocol.Response), shardConcurrentLimit)
go self.readFromResponseChannels(processor, seriesWriter, querySpec.IsExplainQuery(), errors, responseChannels)
err = self.queryShards(querySpec, shards, errors, responseChannels)
// make sure we read the rest of the errors and responses
for _err := range errors {
if err == nil {
err = _err
}
}
for responsechan := range responseChannels {
for response := range responsechan {
if response.GetType() != endStreamResponse {
continue
}
if response.ErrorMessage != nil && err == nil {
err = common.NewQueryError(common.InvalidArgument, *response.ErrorMessage)
}
break
}
}
return err
}
开发者ID:Kenterfie,项目名称:influxdb,代码行数:56,代码来源:coordinator.go
示例10: CheckQueryPermissions
func (self *Permissions) CheckQueryPermissions(user common.User, db string, querySpec *parser.QuerySpec) (ok bool, err common.AuthorizationError) {
switch querySpec.Query().Type() {
case parser.Delete:
return self.AuthorizeDeleteQuery(user, db)
case parser.Select:
return self.AuthorizeSelectQuery(user, db, querySpec)
default:
return true, ""
}
}
开发者ID:carriercomm,项目名称:facette,代码行数:10,代码来源:permissions.go
示例11: hasReadAccess
func (self *Shard) hasReadAccess(querySpec *parser.QuerySpec) bool {
for series := range querySpec.SeriesValuesAndColumns() {
if _, isRegex := series.GetCompiledRegex(); !isRegex {
if !querySpec.HasReadAccess(series.Name) {
return false
}
}
}
return true
}
开发者ID:ericcapricorn,项目名称:influxdb,代码行数:10,代码来源:shard.go
示例12: executeDropSeriesQuery
func (self *LevelDbShard) executeDropSeriesQuery(querySpec *parser.QuerySpec, processor cluster.QueryProcessor) error {
database := querySpec.Database()
series := querySpec.Query().DropSeriesQuery.GetTableName()
err := self.dropSeries(database, series)
if err != nil {
return err
}
self.compact()
return nil
}
开发者ID:Kenterfie,项目名称:influxdb,代码行数:10,代码来源:level_db_shard.go
示例13: expandRegex
func (self *Coordinator) expandRegex(spec *parser.QuerySpec) {
q := spec.SelectQuery()
if q == nil {
return
}
f := func(r *regexp.Regexp) []string {
return self.clusterConfiguration.MetaStore.GetSeriesForDatabaseAndRegex(spec.Database(), r)
}
parser.RewriteMergeQuery(q, f)
}
开发者ID:carriercomm,项目名称:facette,代码行数:12,代码来源:coordinator.go
示例14: GetShardsForQuery
func (self *ClusterConfiguration) GetShardsForQuery(querySpec *parser.QuerySpec) (Shards, error) {
shards, err := self.getShardsToMatchQuery(querySpec)
if err != nil {
return nil, err
}
log.Debug("Querying %d shards for query", len(shards))
shards = self.getShardRange(querySpec, shards)
if querySpec.IsAscending() {
SortShardsByTimeAscending(shards)
}
return shards, nil
}
开发者ID:ericcapricorn,项目名称:influxdb,代码行数:12,代码来源:cluster_configuration.go
示例15: executeQueryForSeries
func (self *Shard) executeQueryForSeries(querySpec *parser.QuerySpec, name string, columns []string, processor engine.Processor) error {
if querySpec.IsSinglePointQuery() {
log.Debug("Running single query for series %s", name)
return self.executeSinglePointQuery(querySpec, name, columns, processor)
}
var pi *PointIterator
var err error
columns, pi, err = self.getPointIteratorForSeries(querySpec, name, columns)
if err != nil {
return err
}
defer pi.Close()
query := querySpec.SelectQuery()
aliases := query.GetTableAliases(name)
seriesOutgoing := &protocol.Series{Name: protocol.String(name), Fields: columns, Points: make([]*protocol.Point, 0, self.pointBatchSize)}
for pi.Valid() {
p := pi.Point()
seriesOutgoing.Points = append(seriesOutgoing.Points, p)
if len(seriesOutgoing.Points) >= self.pointBatchSize {
ok, err := yieldToProcessor(seriesOutgoing, processor, aliases)
if !ok || err != nil {
log.Debug("Stopping processing.")
if err != nil {
log.Error("Error while processing data: %v", err)
return err
}
return nil
}
seriesOutgoing = &protocol.Series{Name: protocol.String(name), Fields: columns, Points: make([]*protocol.Point, 0, self.pointBatchSize)}
}
pi.Next()
}
if err := pi.Error(); err != nil {
return err
}
//Yield remaining data
if ok, err := yieldToProcessor(seriesOutgoing, processor, aliases); !ok || err != nil {
log.Debug("Stopping processing remaining points...")
if err != nil {
log.Error("Error while processing data: %v", err)
return err
}
}
log.Debug("Finished running query %s", query.GetQueryString())
return nil
}
开发者ID:ericcapricorn,项目名称:influxdb,代码行数:52,代码来源:shard.go
示例16: AuthorizeSelectQuery
func (self *Permissions) AuthorizeSelectQuery(user common.User, db string, querySpec *parser.QuerySpec) (ok bool, err common.AuthorizationError) {
// if this isn't a regex query do the permission check here
fromClause := querySpec.SelectQuery().GetFromClause()
for _, n := range fromClause.Names {
if _, ok := n.Name.GetCompiledRegex(); ok {
break
} else if name := n.Name.Name; !user.HasReadAccess(name) {
return false, common.NewAuthorizationError("User doesn't have read access to %s", name)
}
}
return true, ""
}
开发者ID:carriercomm,项目名称:facette,代码行数:13,代码来源:permissions.go
示例17: checkPermission
func (self *CoordinatorImpl) checkPermission(user common.User, querySpec *parser.QuerySpec) error {
// if this isn't a regex query do the permission check here
fromClause := querySpec.SelectQuery().GetFromClause()
for _, n := range fromClause.Names {
if _, ok := n.Name.GetCompiledRegex(); ok {
break
} else if name := n.Name.Name; !user.HasReadAccess(name) {
return fmt.Errorf("User doesn't have read access to %s", name)
}
}
return nil
}
开发者ID:Kenterfie,项目名称:influxdb,代码行数:13,代码来源:coordinator.go
示例18: executeSinglePointQuery
func (self *Shard) executeSinglePointQuery(querySpec *parser.QuerySpec, name string, columns []string, p engine.Processor) error {
fields, err := self.getFieldsForSeries(querySpec.Database(), name, columns)
if err != nil {
log.Error("Error looking up fields for %s: %s", name, err)
return err
}
query := querySpec.SelectQuery()
fieldCount := len(fields)
fieldNames := make([]string, 0, fieldCount)
point := &protocol.Point{Values: make([]*protocol.FieldValue, 0, fieldCount)}
timestamp := common.TimeToMicroseconds(query.GetStartTime())
sequenceNumber, err := query.GetSinglePointQuerySequenceNumber()
if err != nil {
return err
}
// set the timestamp and sequence number
point.SequenceNumber = &sequenceNumber
point.SetTimestampInMicroseconds(timestamp)
for _, field := range fields {
sk := newStorageKey(field.Id, timestamp, sequenceNumber)
data, err := self.db.Get(sk.bytes())
if err != nil {
return err
}
if data == nil {
continue
}
fieldValue := &protocol.FieldValue{}
err = proto.Unmarshal(data, fieldValue)
if err != nil {
return err
}
fieldNames = append(fieldNames, field.Name)
point.Values = append(point.Values, fieldValue)
}
result := &protocol.Series{Name: &name, Fields: fieldNames, Points: []*protocol.Point{point}}
if len(result.Points) > 0 {
_, err := p.Yield(result)
return err
}
return nil
}
开发者ID:ericcapricorn,项目名称:influxdb,代码行数:49,代码来源:shard.go
示例19: getProcessor
func (self *ShardData) getProcessor(querySpec *parser.QuerySpec, processor engine.Processor) (engine.Processor, error) {
switch qt := querySpec.Query().Type(); qt {
case parser.Delete, parser.DropSeries:
return NilProcessor{}, nil
case parser.Select:
// continue
default:
panic(fmt.Errorf("Unexpected query type: %s", qt))
}
if querySpec.IsSinglePointQuery() {
return engine.NewPassthroughEngine(processor, 1), nil
}
query := querySpec.SelectQuery()
var err error
// We should aggregate at the shard level
if self.ShouldAggregateLocally(querySpec) {
log.Debug("creating a query engine")
processor, err = engine.NewQueryEngine(processor, query, nil)
if err != nil {
return nil, err
}
goto addFilter
}
// we shouldn't limit the queries if they have aggregates and aren't
// aggregated locally, otherwise the aggregation result which happen
// in the coordinator will get partial data and will be incorrect
if query.HasAggregates() {
log.Debug("creating a passthrough engine")
processor = engine.NewPassthroughEngine(processor, 1000)
goto addFilter
}
// This is an optimization so we don't send more data that we should
// over the wire. The coordinator has its own Passthrough which does
// the final limit.
if l := query.Limit; l > 0 {
log.Debug("creating a passthrough engine with limit")
processor = engine.NewPassthroughEngineWithLimit(processor, 1000, query.Limit)
}
addFilter:
if query := querySpec.SelectQuery(); query != nil && query.GetFromClause().Type != parser.FromClauseInnerJoin {
// Joins do their own filtering since we need to get all
// points before filtering. This is due to the fact that some
// where expressions will be difficult to compute before the
// points are joined together, think where clause with
// left.column = 'something' or right.column =
// 'something_else'. We can't filter the individual series
// separately. The filtering happens in merge.go:55
processor = engine.NewFilteringEngine(query, processor)
}
return processor, nil
}
开发者ID:vovkasm,项目名称:facette,代码行数:58,代码来源:shard.go
示例20: runListSeriesQuery
func (self *CoordinatorImpl) runListSeriesQuery(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error {
series := self.clusterConfiguration.MetaStore.GetSeriesForDatabase(querySpec.Database())
name := "list_series_result"
fields := []string{"name"}
points := make([]*protocol.Point, len(series), len(series))
for i, s := range series {
fieldValues := []*protocol.FieldValue{{StringValue: proto.String(s)}}
points[i] = &protocol.Point{Values: fieldValues}
}
seriesResult := &protocol.Series{Name: &name, Fields: fields, Points: points}
seriesWriter.Write(seriesResult)
seriesWriter.Close()
return nil
}
开发者ID:WIZARD-CXY,项目名称:golang-devops-stuff,代码行数:16,代码来源:coordinator.go
注:本文中的github.com/influxdb/influxdb/parser.QuerySpec类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论