TUN-6963: Refactor Metrics service setup
This commit is contained in:
parent
c49621c723
commit
af59851f33
|
@ -1,6 +1,7 @@
|
||||||
package proxydns
|
package proxydns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
@ -73,7 +74,7 @@ func Run(c *cli.Context) error {
|
||||||
log.Fatal().Err(err).Msg("Failed to open the metrics listener")
|
log.Fatal().Err(err).Msg("Failed to open the metrics listener")
|
||||||
}
|
}
|
||||||
|
|
||||||
go metrics.ServeMetrics(metricsListener, nil, nil, "", nil, log)
|
go metrics.ServeMetrics(metricsListener, context.Background(), metrics.Config{}, log)
|
||||||
|
|
||||||
listener, err := tunneldns.CreateListener(
|
listener, err := tunneldns.CreateListener(
|
||||||
c.String("address"),
|
c.String("address"),
|
||||||
|
|
|
@ -372,7 +372,12 @@ func StartServer(
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
readinessServer := metrics.NewReadyServer(log, clientID)
|
readinessServer := metrics.NewReadyServer(log, clientID)
|
||||||
observer.RegisterSink(readinessServer)
|
observer.RegisterSink(readinessServer)
|
||||||
errC <- metrics.ServeMetrics(metricsListener, ctx.Done(), readinessServer, quickTunnelURL, orchestrator, log)
|
metricsConfig := metrics.Config{
|
||||||
|
ReadyServer: readinessServer,
|
||||||
|
QuickTunnelHostname: quickTunnelURL,
|
||||||
|
Orchestrator: orchestrator,
|
||||||
|
}
|
||||||
|
errC <- metrics.ServeMetrics(metricsListener, ctx, metricsConfig, log)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
reconnectCh := make(chan supervisor.ReconnectSignal, c.Int("ha-connections"))
|
reconnectCh := make(chan supervisor.ReconnectSignal, c.Int("ha-connections"))
|
||||||
|
|
|
@ -18,18 +18,24 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
shutdownTimeout = time.Second * 15
|
startupTime = time.Millisecond * 500
|
||||||
startupTime = time.Millisecond * 500
|
defaultShutdownTimeout = time.Second * 15
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
ReadyServer *ReadyServer
|
||||||
|
QuickTunnelHostname string
|
||||||
|
Orchestrator orchestrator
|
||||||
|
|
||||||
|
ShutdownTimeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
type orchestrator interface {
|
type orchestrator interface {
|
||||||
GetVersionedConfigJSON() ([]byte, error)
|
GetVersionedConfigJSON() ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMetricsHandler(
|
func newMetricsHandler(
|
||||||
readyServer *ReadyServer,
|
config Config,
|
||||||
quickTunnelHostname string,
|
|
||||||
orchestrator orchestrator,
|
|
||||||
log *zerolog.Logger,
|
log *zerolog.Logger,
|
||||||
) *mux.Router {
|
) *mux.Router {
|
||||||
router := mux.NewRouter()
|
router := mux.NewRouter()
|
||||||
|
@ -39,15 +45,15 @@ func newMetricsHandler(
|
||||||
router.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) {
|
router.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) {
|
||||||
_, _ = fmt.Fprintf(w, "OK\n")
|
_, _ = fmt.Fprintf(w, "OK\n")
|
||||||
})
|
})
|
||||||
if readyServer != nil {
|
if config.ReadyServer != nil {
|
||||||
router.Handle("/ready", readyServer)
|
router.Handle("/ready", config.ReadyServer)
|
||||||
}
|
}
|
||||||
router.HandleFunc("/quicktunnel", func(w http.ResponseWriter, r *http.Request) {
|
router.HandleFunc("/quicktunnel", func(w http.ResponseWriter, r *http.Request) {
|
||||||
_, _ = fmt.Fprintf(w, `{"hostname":"%s"}`, quickTunnelHostname)
|
_, _ = fmt.Fprintf(w, `{"hostname":"%s"}`, config.QuickTunnelHostname)
|
||||||
})
|
})
|
||||||
if orchestrator != nil {
|
if config.Orchestrator != nil {
|
||||||
router.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) {
|
router.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) {
|
||||||
json, err := orchestrator.GetVersionedConfigJSON()
|
json, err := config.Orchestrator.GetVersionedConfigJSON()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
_, _ = fmt.Fprintf(w, "ERR: %v", err)
|
_, _ = fmt.Fprintf(w, "ERR: %v", err)
|
||||||
|
@ -63,10 +69,8 @@ func newMetricsHandler(
|
||||||
|
|
||||||
func ServeMetrics(
|
func ServeMetrics(
|
||||||
l net.Listener,
|
l net.Listener,
|
||||||
shutdownC <-chan struct{},
|
ctx context.Context,
|
||||||
readyServer *ReadyServer,
|
config Config,
|
||||||
quickTunnelHostname string,
|
|
||||||
orchestrator orchestrator,
|
|
||||||
log *zerolog.Logger,
|
log *zerolog.Logger,
|
||||||
) (err error) {
|
) (err error) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
@ -74,7 +78,7 @@ func ServeMetrics(
|
||||||
trace.AuthRequest = func(*http.Request) (bool, bool) { return true, true }
|
trace.AuthRequest = func(*http.Request) (bool, bool) { return true, true }
|
||||||
// TODO: parameterize ReadTimeout and WriteTimeout. The maximum time we can
|
// TODO: parameterize ReadTimeout and WriteTimeout. The maximum time we can
|
||||||
// profile CPU usage depends on WriteTimeout
|
// profile CPU usage depends on WriteTimeout
|
||||||
h := newMetricsHandler(readyServer, quickTunnelHostname, orchestrator, log)
|
h := newMetricsHandler(config, log)
|
||||||
server := &http.Server{
|
server := &http.Server{
|
||||||
ReadTimeout: 10 * time.Second,
|
ReadTimeout: 10 * time.Second,
|
||||||
WriteTimeout: 10 * time.Second,
|
WriteTimeout: 10 * time.Second,
|
||||||
|
@ -91,7 +95,12 @@ func ServeMetrics(
|
||||||
// fully started up. So add artificial delay.
|
// fully started up. So add artificial delay.
|
||||||
time.Sleep(startupTime)
|
time.Sleep(startupTime)
|
||||||
|
|
||||||
<-shutdownC
|
<-ctx.Done()
|
||||||
|
shutdownTimeout := config.ShutdownTimeout
|
||||||
|
if shutdownTimeout == 0 {
|
||||||
|
shutdownTimeout = defaultShutdownTimeout
|
||||||
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
||||||
_ = server.Shutdown(ctx)
|
_ = server.Shutdown(ctx)
|
||||||
cancel()
|
cancel()
|
||||||
|
|
Loading…
Reference in New Issue