TUN-6963: Refactor Metrics service setup

This commit is contained in:
João Oliveirinha 2022-11-22 11:30:28 +00:00
parent c49621c723
commit af59851f33
3 changed files with 33 additions and 18 deletions

View File

@ -1,6 +1,7 @@
package proxydns
import (
"context"
"net"
"os"
"os/signal"
@ -73,7 +74,7 @@ func Run(c *cli.Context) error {
log.Fatal().Err(err).Msg("Failed to open the metrics listener")
}
go metrics.ServeMetrics(metricsListener, nil, nil, "", nil, log)
go metrics.ServeMetrics(metricsListener, context.Background(), metrics.Config{}, log)
listener, err := tunneldns.CreateListener(
c.String("address"),

View File

@ -372,7 +372,12 @@ func StartServer(
defer wg.Done()
readinessServer := metrics.NewReadyServer(log, clientID)
observer.RegisterSink(readinessServer)
errC <- metrics.ServeMetrics(metricsListener, ctx.Done(), readinessServer, quickTunnelURL, orchestrator, log)
metricsConfig := metrics.Config{
ReadyServer: readinessServer,
QuickTunnelHostname: quickTunnelURL,
Orchestrator: orchestrator,
}
errC <- metrics.ServeMetrics(metricsListener, ctx, metricsConfig, log)
}()
reconnectCh := make(chan supervisor.ReconnectSignal, c.Int("ha-connections"))

View File

@ -18,18 +18,24 @@ import (
)
const (
shutdownTimeout = time.Second * 15
startupTime = time.Millisecond * 500
startupTime = time.Millisecond * 500
defaultShutdownTimeout = time.Second * 15
)
type Config struct {
ReadyServer *ReadyServer
QuickTunnelHostname string
Orchestrator orchestrator
ShutdownTimeout time.Duration
}
type orchestrator interface {
GetVersionedConfigJSON() ([]byte, error)
}
func newMetricsHandler(
readyServer *ReadyServer,
quickTunnelHostname string,
orchestrator orchestrator,
config Config,
log *zerolog.Logger,
) *mux.Router {
router := mux.NewRouter()
@ -39,15 +45,15 @@ func newMetricsHandler(
router.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, "OK\n")
})
if readyServer != nil {
router.Handle("/ready", readyServer)
if config.ReadyServer != nil {
router.Handle("/ready", config.ReadyServer)
}
router.HandleFunc("/quicktunnel", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, `{"hostname":"%s"}`, quickTunnelHostname)
_, _ = fmt.Fprintf(w, `{"hostname":"%s"}`, config.QuickTunnelHostname)
})
if orchestrator != nil {
if config.Orchestrator != nil {
router.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) {
json, err := orchestrator.GetVersionedConfigJSON()
json, err := config.Orchestrator.GetVersionedConfigJSON()
if err != nil {
w.WriteHeader(500)
_, _ = fmt.Fprintf(w, "ERR: %v", err)
@ -63,10 +69,8 @@ func newMetricsHandler(
func ServeMetrics(
l net.Listener,
shutdownC <-chan struct{},
readyServer *ReadyServer,
quickTunnelHostname string,
orchestrator orchestrator,
ctx context.Context,
config Config,
log *zerolog.Logger,
) (err error) {
var wg sync.WaitGroup
@ -74,7 +78,7 @@ func ServeMetrics(
trace.AuthRequest = func(*http.Request) (bool, bool) { return true, true }
// TODO: parameterize ReadTimeout and WriteTimeout. The maximum time we can
// profile CPU usage depends on WriteTimeout
h := newMetricsHandler(readyServer, quickTunnelHostname, orchestrator, log)
h := newMetricsHandler(config, log)
server := &http.Server{
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
@ -91,7 +95,12 @@ func ServeMetrics(
// fully started up. So add artificial delay.
time.Sleep(startupTime)
<-shutdownC
<-ctx.Done()
shutdownTimeout := config.ShutdownTimeout
if shutdownTimeout == 0 {
shutdownTimeout = defaultShutdownTimeout
}
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
_ = server.Shutdown(ctx)
cancel()