2021-03-26 04:04:56 +00:00
|
|
|
package retry
|
2018-05-01 23:45:06 +00:00
|
|
|
|
|
|
|
import (
|
2019-01-10 20:55:44 +00:00
|
|
|
"context"
|
2021-02-10 16:42:09 +00:00
|
|
|
"math/rand"
|
2018-05-01 23:45:06 +00:00
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2024-05-23 16:48:34 +00:00
|
|
|
const (
|
|
|
|
DefaultBaseTime time.Duration = time.Second
|
|
|
|
)
|
|
|
|
|
2018-05-01 23:45:06 +00:00
|
|
|
// Redeclare time functions so they can be overridden in tests.
|
2024-05-23 16:48:34 +00:00
|
|
|
type Clock struct {
|
2021-03-26 04:04:56 +00:00
|
|
|
Now func() time.Time
|
|
|
|
After func(d time.Duration) <-chan time.Time
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:45:06 +00:00
|
|
|
// BackoffHandler manages exponential backoff and limits the maximum number of retries.
|
|
|
|
// The base time period is 1 second, doubling with each retry.
|
|
|
|
// After initial success, a grace period can be set to reset the backoff timer if
|
|
|
|
// a connection is maintained successfully for a long enough period. The base grace period
|
|
|
|
// is 2 seconds, doubling with each retry.
|
|
|
|
type BackoffHandler struct {
|
|
|
|
// MaxRetries sets the maximum number of retries to perform. The default value
|
|
|
|
// of 0 disables retry completely.
|
2024-05-23 16:48:34 +00:00
|
|
|
maxRetries uint
|
2018-05-01 23:45:06 +00:00
|
|
|
// RetryForever caps the exponential backoff period according to MaxRetries
|
|
|
|
// but allows you to retry indefinitely.
|
2024-05-23 16:48:34 +00:00
|
|
|
retryForever bool
|
2018-05-01 23:45:06 +00:00
|
|
|
// BaseTime sets the initial backoff period.
|
2024-05-23 16:48:34 +00:00
|
|
|
baseTime time.Duration
|
2018-05-01 23:45:06 +00:00
|
|
|
|
|
|
|
retries uint
|
|
|
|
resetDeadline time.Time
|
2024-05-23 16:48:34 +00:00
|
|
|
|
|
|
|
Clock Clock
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewBackoff(maxRetries uint, baseTime time.Duration, retryForever bool) BackoffHandler {
|
|
|
|
return BackoffHandler{
|
|
|
|
maxRetries: maxRetries,
|
|
|
|
baseTime: baseTime,
|
|
|
|
retryForever: retryForever,
|
|
|
|
Clock: Clock{Now: time.Now, After: time.After},
|
|
|
|
}
|
2018-05-01 23:45:06 +00:00
|
|
|
}
|
|
|
|
|
2021-02-10 16:42:09 +00:00
|
|
|
func (b BackoffHandler) GetMaxBackoffDuration(ctx context.Context) (time.Duration, bool) {
|
2018-05-01 23:45:06 +00:00
|
|
|
// Follows the same logic as Backoff, but without mutating the receiver.
|
|
|
|
// This select has to happen first to reflect the actual behaviour of the Backoff function.
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return time.Duration(0), false
|
|
|
|
default:
|
|
|
|
}
|
2024-05-23 16:48:34 +00:00
|
|
|
if !b.resetDeadline.IsZero() && b.Clock.Now().After(b.resetDeadline) {
|
2018-05-01 23:45:06 +00:00
|
|
|
// b.retries would be set to 0 at this point
|
|
|
|
return time.Second, true
|
|
|
|
}
|
2024-05-23 16:48:34 +00:00
|
|
|
if b.retries >= b.maxRetries && !b.retryForever {
|
2018-05-01 23:45:06 +00:00
|
|
|
return time.Duration(0), false
|
|
|
|
}
|
2021-02-10 16:42:09 +00:00
|
|
|
maxTimeToWait := b.GetBaseTime() * 1 << (b.retries + 1)
|
|
|
|
return maxTimeToWait, true
|
2018-05-01 23:45:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// BackoffTimer returns a channel that sends the current time when the exponential backoff timeout expires.
|
|
|
|
// Returns nil if the maximum number of retries have been used.
|
|
|
|
func (b *BackoffHandler) BackoffTimer() <-chan time.Time {
|
2024-05-23 16:48:34 +00:00
|
|
|
if !b.resetDeadline.IsZero() && b.Clock.Now().After(b.resetDeadline) {
|
2018-05-01 23:45:06 +00:00
|
|
|
b.retries = 0
|
|
|
|
b.resetDeadline = time.Time{}
|
|
|
|
}
|
2024-05-23 16:48:34 +00:00
|
|
|
if b.retries >= b.maxRetries {
|
|
|
|
if !b.retryForever {
|
2018-05-01 23:45:06 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
b.retries++
|
|
|
|
}
|
2021-02-10 16:42:09 +00:00
|
|
|
maxTimeToWait := time.Duration(b.GetBaseTime() * 1 << (b.retries))
|
|
|
|
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds()))
|
2024-05-23 16:48:34 +00:00
|
|
|
return b.Clock.After(timeToWait)
|
2018-05-01 23:45:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Backoff is used to wait according to exponential backoff. Returns false if the
|
|
|
|
// maximum number of retries have been used or if the underlying context has been cancelled.
|
|
|
|
func (b *BackoffHandler) Backoff(ctx context.Context) bool {
|
|
|
|
c := b.BackoffTimer()
|
|
|
|
if c == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-c:
|
|
|
|
return true
|
|
|
|
case <-ctx.Done():
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sets a grace period within which the the backoff timer is maintained. After the grace
|
|
|
|
// period expires, the number of retries & backoff duration is reset.
|
2023-07-24 13:39:25 +00:00
|
|
|
func (b *BackoffHandler) SetGracePeriod() time.Duration {
|
2021-02-10 16:42:09 +00:00
|
|
|
maxTimeToWait := b.GetBaseTime() * 2 << (b.retries + 1)
|
|
|
|
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds()))
|
2024-05-23 16:48:34 +00:00
|
|
|
b.resetDeadline = b.Clock.Now().Add(timeToWait)
|
2023-07-24 13:39:25 +00:00
|
|
|
|
|
|
|
return timeToWait
|
2018-05-01 23:45:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b BackoffHandler) GetBaseTime() time.Duration {
|
2024-05-23 16:48:34 +00:00
|
|
|
if b.baseTime == 0 {
|
|
|
|
return DefaultBaseTime
|
2018-05-01 23:45:06 +00:00
|
|
|
}
|
2024-05-23 16:48:34 +00:00
|
|
|
return b.baseTime
|
2018-05-01 23:45:06 +00:00
|
|
|
}
|
2019-12-04 17:22:08 +00:00
|
|
|
|
|
|
|
// Retries returns the number of retries consumed so far.
|
|
|
|
func (b *BackoffHandler) Retries() int {
|
|
|
|
return int(b.retries)
|
|
|
|
}
|
2020-10-14 13:42:00 +00:00
|
|
|
|
|
|
|
func (b *BackoffHandler) ReachedMaxRetries() bool {
|
2024-05-23 16:48:34 +00:00
|
|
|
return b.retries == b.maxRetries
|
2020-10-14 13:42:00 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 04:04:56 +00:00
|
|
|
func (b *BackoffHandler) ResetNow() {
|
2024-05-23 16:48:34 +00:00
|
|
|
b.resetDeadline = b.Clock.Now()
|
|
|
|
b.retries = 0
|
2020-10-14 13:42:00 +00:00
|
|
|
}
|