This file is indexed.

/usr/share/gocode/src/github.com/docker/go-events/retry.go is in golang-github-docker-go-events-dev 0.0~git20160331.0.882f161-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
package events

import (
	"math/rand"
	"sync"
	"sync/atomic"
	"time"

	"github.com/Sirupsen/logrus"
)

// RetryingSink retries the write until success or an ErrSinkClosed is
// returned. Underlying sink must have p > 0 of succeeding or the sink will
// block. Retry is configured with a RetryStrategy.  Concurrent calls to a
// retrying sink are serialized through the sink, meaning that if one is
// in-flight, another will not proceed.
type RetryingSink struct {
	sink     Sink
	strategy RetryStrategy
	closed   chan struct{}
}

// NewRetryingSink returns a sink that will retry writes to a sink, backing
// off on failure. Parameters threshold and backoff adjust the behavior of the
// circuit breaker.
func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink {
	rs := &RetryingSink{
		sink:     sink,
		strategy: strategy,
		closed:   make(chan struct{}),
	}

	return rs
}

// Write attempts to flush the events to the downstream sink until it succeeds
// or the sink is closed.
func (rs *RetryingSink) Write(event Event) error {
	logger := logrus.WithField("event", event)

retry:
	select {
	case <-rs.closed:
		return ErrSinkClosed
	default:
	}

	if backoff := rs.strategy.Proceed(event); backoff > 0 {
		select {
		case <-time.After(backoff):
			// TODO(stevvooe): This branch holds up the next try. Before, we
			// would simply break to the "retry" label and then possibly wait
			// again. However, this requires all retry strategies to have a
			// large probability of probing the sync for success, rather than
			// just backing off and sending the request.
		case <-rs.closed:
			return ErrSinkClosed
		}
	}

	if err := rs.sink.Write(event); err != nil {
		if err == ErrSinkClosed {
			// terminal!
			return err
		}

		logger := logger.WithError(err) // shadow!!

		if rs.strategy.Failure(event, err) {
			logger.Errorf("retryingsink: dropped event")
			return nil
		}

		logger.Errorf("retryingsink: error writing event, retrying")
		goto retry
	}

	rs.strategy.Success(event)
	return nil
}

// Close closes the sink and the underlying sink.
func (rs *RetryingSink) Close() error {
	select {
	case <-rs.closed:
		return ErrSinkClosed
	default:
		close(rs.closed)
		return rs.sink.Close()
	}
}

// RetryStrategy defines a strategy for retrying event sink writes.
//
// All methods should be goroutine safe.
type RetryStrategy interface {
	// Proceed is called before every event send. If proceed returns a
	// positive, non-zero integer, the retryer will back off by the provided
	// duration.
	//
	// An event is provided, by may be ignored.
	Proceed(event Event) time.Duration

	// Failure reports a failure to the strategy. If this method returns true,
	// the event should be dropped.
	Failure(event Event, err error) bool

	// Success should be called when an event is sent successfully.
	Success(event Event)
}

// Breaker implements a circuit breaker retry strategy.
//
// The current implementation never drops events.
type Breaker struct {
	threshold int
	recent    int
	last      time.Time
	backoff   time.Duration // time after which we retry after failure.
	mu        sync.Mutex
}

var _ RetryStrategy = &Breaker{}

// NewBreaker returns a breaker that will backoff after the threshold has been
// tripped. A Breaker is thread safe and may be shared by many goroutines.
func NewBreaker(threshold int, backoff time.Duration) *Breaker {
	return &Breaker{
		threshold: threshold,
		backoff:   backoff,
	}
}

// Proceed checks the failures against the threshold.
func (b *Breaker) Proceed(event Event) time.Duration {
	b.mu.Lock()
	defer b.mu.Unlock()

	if b.recent < b.threshold {
		return 0
	}

	return b.last.Add(b.backoff).Sub(time.Now())
}

// Success resets the breaker.
func (b *Breaker) Success(event Event) {
	b.mu.Lock()
	defer b.mu.Unlock()

	b.recent = 0
	b.last = time.Time{}
}

// Failure records the failure and latest failure time.
func (b *Breaker) Failure(event Event, err error) bool {
	b.mu.Lock()
	defer b.mu.Unlock()

	b.recent++
	b.last = time.Now().UTC()
	return false // never drop events.
}

var (
	// DefaultExponentialBackoffConfig provides a default configuration for
	// exponential backoff.
	DefaultExponentialBackoffConfig = ExponentialBackoffConfig{
		Base:   time.Second,
		Factor: time.Second,
		Max:    20 * time.Second,
	}
)

// ExponentialBackoffConfig configures backoff parameters.
//
// Note that these parameters operate on the upper bound for choosing a random
// value. For example, at Base=1s, a random value in [0,1s) will be chosen for
// the backoff value.
type ExponentialBackoffConfig struct {
	// Base is the minimum bound for backing off after failure.
	Base time.Duration

	// Factor sets the amount of time by which the backoff grows with each
	// failure.
	Factor time.Duration

	// Max is the absolute maxiumum bound for a single backoff.
	Max time.Duration
}

// ExponentialBackoff implements random backoff with exponentially increasing
// bounds as the number consecutive failures increase.
type ExponentialBackoff struct {
	config   ExponentialBackoffConfig
	failures uint64 // consecutive failure counter.
}

// NewExponentialBackoff returns an exponential backoff strategy with the
// desired config. If config is nil, the default is returned.
func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff {
	return &ExponentialBackoff{
		config: config,
	}
}

// Proceed returns the next randomly bound exponential backoff time.
func (b *ExponentialBackoff) Proceed(event Event) time.Duration {
	return b.backoff(atomic.LoadUint64(&b.failures))
}

// Success resets the failures counter.
func (b *ExponentialBackoff) Success(event Event) {
	atomic.StoreUint64(&b.failures, 0)
}

// Failure increments the failure counter.
func (b *ExponentialBackoff) Failure(event Event, err error) bool {
	atomic.AddUint64(&b.failures, 1)
	return false
}

// backoff calculates the amount of time to wait based on the number of
// consecutive failures.
func (b *ExponentialBackoff) backoff(failures uint64) time.Duration {
	if failures <= 0 {
		// proceed normally when there are no failures.
		return 0
	}

	factor := b.config.Factor
	if factor <= 0 {
		factor = DefaultExponentialBackoffConfig.Factor
	}

	backoff := b.config.Base + factor*time.Duration(1<<(failures-1))

	max := b.config.Max
	if max <= 0 {
		max = DefaultExponentialBackoffConfig.Max
	}

	if backoff > max || backoff < 0 {
		backoff = max
	}

	// Choose a uniformly distributed value from [0, backoff).
	return time.Duration(rand.Int63n(int64(backoff)))
}