This file is indexed.

/usr/share/gocode/src/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go is in golang-github-influxdb-influxdb-dev 1.1.1+dfsg1-4.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
package main

import (
	"fmt"
	"log"
	"runtime"
	"sync"
	"sync/atomic"
	"time"

	"github.com/influxdata/influxdb/cmd/influx_tsm/stats"
	"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb"
)

// tracker will orchestrate and track the conversions of non-TSM shards to TSM
type tracker struct {
	Stats stats.Stats

	shards tsdb.ShardInfos
	opts   options

	pg ParallelGroup
	wg sync.WaitGroup
}

// newTracker will setup and return a clean tracker instance
func newTracker(shards tsdb.ShardInfos, opts options) *tracker {
	t := &tracker{
		shards: shards,
		opts:   opts,
		pg:     NewParallelGroup(runtime.GOMAXPROCS(0)),
	}

	return t
}

func (t *tracker) Run() error {
	conversionStart := time.Now()

	// Backup each directory.
	if !opts.SkipBackup {
		databases := t.shards.Databases()
		fmt.Printf("Backing up %d databases...\n", len(databases))
		t.wg.Add(len(databases))
		for i := range databases {
			db := databases[i]
			go t.pg.Do(func() {
				defer t.wg.Done()

				start := time.Now()
				log.Printf("Backup of database '%v' started", db)
				err := backupDatabase(db)
				if err != nil {
					log.Fatalf("Backup of database %v failed: %v\n", db, err)
				}
				log.Printf("Database %v backed up (%v)\n", db, time.Now().Sub(start))
			})
		}
		t.wg.Wait()
	} else {
		fmt.Println("Database backup disabled.")
	}

	t.wg.Add(len(t.shards))
	for i := range t.shards {
		si := t.shards[i]
		go t.pg.Do(func() {
			defer func() {
				atomic.AddUint64(&t.Stats.CompletedShards, 1)
				t.wg.Done()
			}()

			start := time.Now()
			log.Printf("Starting conversion of shard: %v", si.FullPath(opts.DataPath))
			if err := convertShard(si, t); err != nil {
				log.Fatalf("Failed to convert %v: %v\n", si.FullPath(opts.DataPath), err)
			}
			log.Printf("Conversion of %v successful (%v)\n", si.FullPath(opts.DataPath), time.Since(start))
		})
	}

	done := make(chan struct{})
	go func() {
		t.wg.Wait()
		close(done)
	}()

WAIT_LOOP:
	for {
		select {
		case <-done:
			break WAIT_LOOP
		case <-time.After(opts.UpdateInterval):
			t.StatusUpdate()
		}
	}

	t.Stats.TotalTime = time.Since(conversionStart)

	return nil
}

func (t *tracker) StatusUpdate() {
	shardCount := atomic.LoadUint64(&t.Stats.CompletedShards)
	pointCount := atomic.LoadUint64(&t.Stats.PointsRead)
	pointWritten := atomic.LoadUint64(&t.Stats.PointsWritten)

	log.Printf("Still Working: Completed Shards: %d/%d Points read/written: %d/%d", shardCount, len(t.shards), pointCount, pointWritten)
}

func (t *tracker) PrintStats() {
	preSize := t.shards.Size()
	postSize := int64(t.Stats.TsmBytesWritten)

	fmt.Printf("\nSummary statistics\n========================================\n")
	fmt.Printf("Databases converted:                 %d\n", len(t.shards.Databases()))
	fmt.Printf("Shards converted:                    %d\n", len(t.shards))
	fmt.Printf("TSM files created:                   %d\n", t.Stats.TsmFilesCreated)
	fmt.Printf("Points read:                         %d\n", t.Stats.PointsRead)
	fmt.Printf("Points written:                      %d\n", t.Stats.PointsWritten)
	fmt.Printf("NaN filtered:                        %d\n", t.Stats.NanFiltered)
	fmt.Printf("Inf filtered:                        %d\n", t.Stats.InfFiltered)
	fmt.Printf("Points without fields filtered:      %d\n", t.Stats.FieldsFiltered)
	fmt.Printf("Disk usage pre-conversion (bytes):   %d\n", preSize)
	fmt.Printf("Disk usage post-conversion (bytes):  %d\n", postSize)
	fmt.Printf("Reduction factor:                    %d%%\n", 100*(preSize-postSize)/preSize)
	fmt.Printf("Bytes per TSM point:                 %.2f\n", float64(postSize)/float64(t.Stats.PointsWritten))
	fmt.Printf("Total conversion time:               %v\n", t.Stats.TotalTime)
	fmt.Println()
}