This file is indexed.

/usr/share/gocode/src/github.com/influxdata/influxdb/services/continuous_querier/service.go is in golang-github-influxdb-influxdb-dev 1.1.1+dfsg1-4.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
package continuous_querier // import "github.com/influxdata/influxdb/services/continuous_querier"

import (
	"errors"
	"fmt"
	"io"
	"log"
	"os"
	"strings"
	"sync"
	"sync/atomic"
	"time"

	"github.com/influxdata/influxdb/influxql"
	"github.com/influxdata/influxdb/models"
	"github.com/influxdata/influxdb/services/meta"
)

const (
	// NoChunkingSize specifies when not to chunk results. When planning
	// a select statement, passing zero tells it not to chunk results.
	// Only applies to raw queries.
	NoChunkingSize = 0

	// idDelimiter is used as a delimiter when creating a unique name for a
	// Continuous Query.
	idDelimiter = string(rune(31)) // unit separator
)

// Statistics for the CQ service.
const (
	statQueryOK   = "queryOk"
	statQueryFail = "queryFail"
)

// ContinuousQuerier represents a service that executes continuous queries.
type ContinuousQuerier interface {
	// Run executes the named query in the named database.  Blank database or name matches all.
	Run(database, name string, t time.Time) error
}

// metaClient is an internal interface to make testing easier.
type metaClient interface {
	AcquireLease(name string) (l *meta.Lease, err error)
	Databases() []meta.DatabaseInfo
	Database(name string) *meta.DatabaseInfo
}

// RunRequest is a request to run one or more CQs.
type RunRequest struct {
	// Now tells the CQ serivce what the current time is.
	Now time.Time
	// CQs tells the CQ service which queries to run.
	// If nil, all queries will be run.
	CQs []string
}

// matches returns true if the CQ matches one of the requested CQs.
func (rr *RunRequest) matches(cq *meta.ContinuousQueryInfo) bool {
	if rr.CQs == nil {
		return true
	}
	for _, q := range rr.CQs {
		if q == cq.Name {
			return true
		}
	}
	return false
}

// Service manages continuous query execution.
type Service struct {
	MetaClient    metaClient
	QueryExecutor *influxql.QueryExecutor
	Config        *Config
	RunInterval   time.Duration
	// RunCh can be used by clients to signal service to run CQs.
	RunCh          chan *RunRequest
	Logger         *log.Logger
	loggingEnabled bool
	stats          *Statistics
	// lastRuns maps CQ name to last time it was run.
	mu       sync.RWMutex
	lastRuns map[string]time.Time
	stop     chan struct{}
	wg       *sync.WaitGroup
}

// NewService returns a new instance of Service.
func NewService(c Config) *Service {
	s := &Service{
		Config:         &c,
		RunInterval:    time.Duration(c.RunInterval),
		RunCh:          make(chan *RunRequest),
		loggingEnabled: c.LogEnabled,
		Logger:         log.New(os.Stderr, "[continuous_querier] ", log.LstdFlags),
		stats:          &Statistics{},
		lastRuns:       map[string]time.Time{},
	}

	return s
}

// Open starts the service.
func (s *Service) Open() error {
	s.Logger.Println("Starting continuous query service")

	if s.stop != nil {
		return nil
	}

	assert(s.MetaClient != nil, "MetaClient is nil")
	assert(s.QueryExecutor != nil, "QueryExecutor is nil")

	s.stop = make(chan struct{})
	s.wg = &sync.WaitGroup{}
	s.wg.Add(1)
	go s.backgroundLoop()
	return nil
}

// Close stops the service.
func (s *Service) Close() error {
	if s.stop == nil {
		return nil
	}
	close(s.stop)
	s.wg.Wait()
	s.wg = nil
	s.stop = nil
	return nil
}

// SetLogOutput sets the writer to which all logs are written. It must not be
// called after Open is called.
func (s *Service) SetLogOutput(w io.Writer) {
	s.Logger = log.New(w, "[continuous_querier] ", log.LstdFlags)
}

// Statistics maintains the statistics for the continuous query service.
type Statistics struct {
	QueryOK   int64
	QueryFail int64
}

// Statistics returns statistics for periodic monitoring.
func (s *Service) Statistics(tags map[string]string) []models.Statistic {
	return []models.Statistic{{
		Name: "cq",
		Tags: tags,
		Values: map[string]interface{}{
			statQueryOK:   atomic.LoadInt64(&s.stats.QueryOK),
			statQueryFail: atomic.LoadInt64(&s.stats.QueryFail),
		},
	}}
}

// Run runs the specified continuous query, or all CQs if none is specified.
func (s *Service) Run(database, name string, t time.Time) error {
	var dbs []meta.DatabaseInfo

	if database != "" {
		// Find the requested database.
		db := s.MetaClient.Database(database)
		if db == nil {
			return influxql.ErrDatabaseNotFound(database)
		}
		dbs = append(dbs, *db)
	} else {
		// Get all databases.
		dbs = s.MetaClient.Databases()
	}

	// Loop through databases.
	s.mu.Lock()
	defer s.mu.Unlock()
	for _, db := range dbs {
		// Loop through CQs in each DB executing the ones that match name.
		for _, cq := range db.ContinuousQueries {
			if name == "" || cq.Name == name {
				// Remove the last run time for the CQ
				id := fmt.Sprintf("%s%s%s", db.Name, idDelimiter, cq.Name)
				if _, ok := s.lastRuns[id]; ok {
					delete(s.lastRuns, id)
				}
			}
		}
	}

	// Signal the background routine to run CQs.
	s.RunCh <- &RunRequest{Now: t}

	return nil
}

// backgroundLoop runs on a go routine and periodically executes CQs.
func (s *Service) backgroundLoop() {
	leaseName := "continuous_querier"
	defer s.wg.Done()
	for {
		select {
		case <-s.stop:
			s.Logger.Println("continuous query service terminating")
			return
		case req := <-s.RunCh:
			if !s.hasContinuousQueries() {
				continue
			}
			if _, err := s.MetaClient.AcquireLease(leaseName); err == nil {
				s.Logger.Printf("running continuous queries by request for time: %v", req.Now)
				s.runContinuousQueries(req)
			}
		case <-time.After(s.RunInterval):
			if !s.hasContinuousQueries() {
				continue
			}
			if _, err := s.MetaClient.AcquireLease(leaseName); err == nil {
				s.runContinuousQueries(&RunRequest{Now: time.Now()})
			}
		}
	}
}

// hasContinuousQueries returns true if any CQs exist.
func (s *Service) hasContinuousQueries() bool {
	// Get list of all databases.
	dbs := s.MetaClient.Databases()
	// Loop through all databases executing CQs.
	for _, db := range dbs {
		if len(db.ContinuousQueries) > 0 {
			return true
		}
	}
	return false
}

// runContinuousQueries gets CQs from the meta store and runs them.
func (s *Service) runContinuousQueries(req *RunRequest) {
	// Get list of all databases.
	dbs := s.MetaClient.Databases()
	// Loop through all databases executing CQs.
	for _, db := range dbs {
		// TODO: distribute across nodes
		for _, cq := range db.ContinuousQueries {
			if !req.matches(&cq) {
				continue
			}
			if err := s.ExecuteContinuousQuery(&db, &cq, req.Now); err != nil {
				s.Logger.Printf("error executing query: %s: err = %s", cq.Query, err)
				atomic.AddInt64(&s.stats.QueryFail, 1)
			} else {
				atomic.AddInt64(&s.stats.QueryOK, 1)
			}
		}
	}
}

// ExecuteContinuousQuery executes a single CQ.
func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.ContinuousQueryInfo, now time.Time) error {
	// TODO: re-enable stats
	//s.stats.Inc("continuousQueryExecuted")

	// Local wrapper / helper.
	cq, err := NewContinuousQuery(dbi.Name, cqi)
	if err != nil {
		return err
	}

	// Get the last time this CQ was run from the service's cache.
	s.mu.Lock()
	defer s.mu.Unlock()
	id := fmt.Sprintf("%s%s%s", dbi.Name, idDelimiter, cqi.Name)
	cq.LastRun, cq.HasRun = s.lastRuns[id]

	// Set the retention policy to default if it wasn't specified in the query.
	if cq.intoRP() == "" {
		cq.setIntoRP(dbi.DefaultRetentionPolicy)
	}

	// See if this query needs to be run.
	run, nextRun, err := cq.shouldRunContinuousQuery(now)
	if err != nil {
		return err
	} else if !run {
		return nil
	}

	// Get the group by interval.
	interval, err := cq.q.GroupByInterval()
	if err != nil {
		return err
	} else if interval == 0 {
		return nil
	}

	// Get the group by offset.
	offset, err := cq.q.GroupByOffset()
	if err != nil {
		return err
	}

	resampleEvery := interval
	if cq.Resample.Every != 0 {
		resampleEvery = cq.Resample.Every
	}

	// We're about to run the query so store the current time closest to the nearest interval.
	// If all is going well, this time should be the same as nextRun.
	cq.LastRun = now.Add(-offset).Truncate(resampleEvery).Add(offset)
	s.lastRuns[id] = cq.LastRun

	// Retrieve the oldest interval we should calculate based on the next time
	// interval. We do this instead of using the current time just in case any
	// time intervals were missed. The start time of the oldest interval is what
	// we use as the start time.
	resampleFor := interval
	if cq.Resample.For != 0 {
		resampleFor = cq.Resample.For
	} else if interval < resampleEvery {
		resampleFor = resampleEvery
	}

	// If the resample interval is greater than the interval of the query, use the
	// query interval instead.
	if interval < resampleEvery {
		resampleEvery = interval
	}

	// Calculate and set the time range for the query.
	startTime := nextRun.Add(interval - resampleFor - offset - 1).Truncate(interval).Add(offset)
	endTime := now.Add(interval - resampleEvery - offset).Truncate(interval).Add(offset)
	if !endTime.After(startTime) {
		// Exit early since there is no time interval.
		return nil
	}

	if err := cq.q.SetTimeRange(startTime, endTime); err != nil {
		s.Logger.Printf("error setting time range: %s\n", err)
		return err
	}

	var start time.Time
	if s.loggingEnabled {
		s.Logger.Printf("executing continuous query %s (%v to %v)", cq.Info.Name, startTime, endTime)
		start = time.Now()
	}

	// Do the actual processing of the query & writing of results.
	if err := s.runContinuousQueryAndWriteResult(cq); err != nil {
		s.Logger.Printf("error: %s. running: %s\n", err, cq.q.String())
		return err
	}

	if s.loggingEnabled {
		s.Logger.Printf("finished continuous query %s (%v to %v) in %s", cq.Info.Name, startTime, endTime, time.Now().Sub(start))
	}
	return nil
}

// runContinuousQueryAndWriteResult will run the query against the cluster and write the results back in
func (s *Service) runContinuousQueryAndWriteResult(cq *ContinuousQuery) error {
	// Wrap the CQ's inner SELECT statement in a Query for the QueryExecutor.
	q := &influxql.Query{
		Statements: influxql.Statements([]influxql.Statement{cq.q}),
	}

	closing := make(chan struct{})
	defer close(closing)

	// Execute the SELECT.
	ch := s.QueryExecutor.ExecuteQuery(q, influxql.ExecutionOptions{
		Database: cq.Database,
	}, closing)

	// There is only one statement, so we will only ever receive one result
	res, ok := <-ch
	if !ok {
		panic("result channel was closed")
	}
	if res.Err != nil {
		return res.Err
	}
	return nil
}

// ContinuousQuery is a local wrapper / helper around continuous queries.
type ContinuousQuery struct {
	Database string
	Info     *meta.ContinuousQueryInfo
	HasRun   bool
	LastRun  time.Time
	Resample ResampleOptions
	q        *influxql.SelectStatement
}

func (cq *ContinuousQuery) intoRP() string      { return cq.q.Target.Measurement.RetentionPolicy }
func (cq *ContinuousQuery) setIntoRP(rp string) { cq.q.Target.Measurement.RetentionPolicy = rp }

// Customizes the resampling intervals and duration of this continuous query.
type ResampleOptions struct {
	// The query will be resampled at this time interval. The first query will be
	// performed at this time interval. If this option is not given, the resample
	// interval is set to the group by interval.
	Every time.Duration

	// The query will continue being resampled for this time duration. If this
	// option is not given, the resample duration is the same as the group by
	// interval. A bucket's time is calculated based on the bucket's start time,
	// so a 40m resample duration with a group by interval of 10m will resample
	// the bucket 4 times (using the default time interval).
	For time.Duration
}

// NewContinuousQuery returns a ContinuousQuery object with a parsed influxql.CreateContinuousQueryStatement
func NewContinuousQuery(database string, cqi *meta.ContinuousQueryInfo) (*ContinuousQuery, error) {
	stmt, err := influxql.NewParser(strings.NewReader(cqi.Query)).ParseStatement()
	if err != nil {
		return nil, err
	}

	q, ok := stmt.(*influxql.CreateContinuousQueryStatement)
	if !ok || q.Source.Target == nil || q.Source.Target.Measurement == nil {
		return nil, errors.New("query isn't a valid continuous query")
	}

	cquery := &ContinuousQuery{
		Database: database,
		Info:     cqi,
		Resample: ResampleOptions{
			Every: q.ResampleEvery,
			For:   q.ResampleFor,
		},
		q: q.Source,
	}

	return cquery, nil
}

// shouldRunContinuousQuery returns true if the CQ should be schedule to run. It will use the
// lastRunTime of the CQ and the rules for when to run set through the query to determine
// if this CQ should be run
func (cq *ContinuousQuery) shouldRunContinuousQuery(now time.Time) (bool, time.Time, error) {
	// if it's not aggregated we don't run it
	if cq.q.IsRawQuery {
		return false, cq.LastRun, errors.New("continuous queries must be aggregate queries")
	}

	// since it's aggregated we need to figure how often it should be run
	interval, err := cq.q.GroupByInterval()
	if err != nil {
		return false, cq.LastRun, err
	}

	// allow the interval to be overwritten by the query's resample options
	resampleEvery := interval
	if cq.Resample.Every != 0 {
		resampleEvery = cq.Resample.Every
	}

	// if we've passed the amount of time since the last run, or there was no last run, do it up
	if cq.HasRun {
		nextRun := cq.LastRun.Add(resampleEvery)
		if nextRun.UnixNano() <= now.UnixNano() {
			return true, nextRun, nil
		}
	} else {
		return true, now, nil
	}

	return false, cq.LastRun, nil
}

// assert will panic with a given formatted message if the given condition is false.
func assert(condition bool, msg string, v ...interface{}) {
	if !condition {
		panic(fmt.Sprintf("assert failed: "+msg, v...))
	}
}