TUN-7360: Add Get Host Details handler in management service
With the management tunnels work, we allow calls to our edge service using an access JWT provided by Tunnelstore. Given a connector ID, this request is then proxied to the appropriate Cloudflare Tunnel. This PR takes advantage of this flow and adds a new host_details endpoint. Calls to this endpoint will result in cloudflared gathering some details about the host: hostname (os.hostname()) and ip address (localAddr in a dial). Note that the mini spec lists 4 alternatives and this picks alternative 3 because: 1. Ease of implementation: This is quick and non-intrusive to any of our code path. We expect to change how connection tracking works and regardless of the direction we take, it may be easy to keep, morph or throw this away. 2. The cloudflared part of this round trip takes some time with a hostname call and a dial. But note that this is off the critical path and not an API that will be exercised often.
This commit is contained in:
parent
3996b1adca
commit
5e212a6bf3
|
@ -32,6 +32,38 @@ func Init(bi *cliutil.BuildInfo) {
|
|||
}
|
||||
|
||||
func Command() *cli.Command {
|
||||
subcommands := []*cli.Command{
|
||||
buildTailManagementTokenSubcommand(),
|
||||
}
|
||||
|
||||
return buildTailCommand(subcommands)
|
||||
}
|
||||
|
||||
func buildTailManagementTokenSubcommand() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "token",
|
||||
Action: cliutil.ConfiguredAction(managementTokenCommand),
|
||||
Usage: "Get management access jwt",
|
||||
UsageText: "cloudflared tail token TUNNEL_ID",
|
||||
Description: `Get management access jwt for a tunnel`,
|
||||
Hidden: true,
|
||||
}
|
||||
}
|
||||
|
||||
func managementTokenCommand(c *cli.Context) error {
|
||||
log := createLogger(c)
|
||||
token, err := getManagementToken(c, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var tokenResponse = struct {
|
||||
Token string `json:"token"`
|
||||
}{Token: token}
|
||||
|
||||
return json.NewEncoder(os.Stdout).Encode(tokenResponse)
|
||||
}
|
||||
|
||||
func buildTailCommand(subcommands []*cli.Command) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "tail",
|
||||
Action: Run,
|
||||
|
@ -87,6 +119,7 @@ func Command() *cli.Command {
|
|||
Value: credentials.FindDefaultOriginCertPath(),
|
||||
},
|
||||
},
|
||||
Subcommands: subcommands,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/cloudflare/cloudflared/config"
|
||||
"github.com/cloudflare/cloudflared/connection"
|
||||
"github.com/cloudflare/cloudflared/credentials"
|
||||
"github.com/cloudflare/cloudflared/edgediscovery"
|
||||
"github.com/cloudflare/cloudflared/features"
|
||||
"github.com/cloudflare/cloudflared/ingress"
|
||||
"github.com/cloudflare/cloudflared/logger"
|
||||
|
@ -399,7 +400,14 @@ func StartServer(
|
|||
|
||||
localRules := []ingress.Rule{}
|
||||
if features.Contains(features.FeatureManagementLogs) {
|
||||
mgmt := management.New(c.String("management-hostname"), logger.ManagementLogger.Log, logger.ManagementLogger)
|
||||
serviceIP := c.String("service-op-ip")
|
||||
if edgeAddrs, err := edgediscovery.ResolveEdge(log, tunnelConfig.Region, tunnelConfig.EdgeIPVersion); err == nil {
|
||||
if serviceAddr, err := edgeAddrs.GetAddrForRPC(); err == nil {
|
||||
serviceIP = serviceAddr.TCP.String()
|
||||
}
|
||||
}
|
||||
|
||||
mgmt := management.New(c.String("management-hostname"), serviceIP, clientID, logger.ManagementLogger.Log, logger.ManagementLogger)
|
||||
localRules = []ingress.Rule{ingress.NewManagementRule(mgmt)}
|
||||
}
|
||||
orchestrator, err := orchestration.NewOrchestrator(ctx, orchestratorConfig, tunnelConfig.Tags, localRules, tunnelConfig.Log)
|
||||
|
@ -907,6 +915,13 @@ func configureProxyFlags(shouldHide bool) []cli.Flag {
|
|||
Hidden: true,
|
||||
Value: "management.argotunnel.com",
|
||||
}),
|
||||
altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "service-op-ip",
|
||||
Usage: "Fallback IP for service operations run by the management service.",
|
||||
EnvVars: []string{"TUNNEL_SERVICE_OP_IP"},
|
||||
Hidden: true,
|
||||
Value: "198.41.200.113:80",
|
||||
}),
|
||||
}
|
||||
return append(flags, sshFlags(shouldHide)...)
|
||||
}
|
||||
|
|
|
@ -28,6 +28,26 @@ class CloudflaredCli:
|
|||
listed = self._run_command(cmd_args, "list")
|
||||
return json.loads(listed.stdout)
|
||||
|
||||
def get_management_token(self, config, config_path):
|
||||
basecmd = [config.cloudflared_binary]
|
||||
if config_path is not None:
|
||||
basecmd += ["--config", str(config_path)]
|
||||
origincert = get_config_from_file()["origincert"]
|
||||
if origincert:
|
||||
basecmd += ["--origincert", origincert]
|
||||
|
||||
cmd_args = ["tail", "token", config.get_tunnel_id()]
|
||||
cmd = basecmd + cmd_args
|
||||
result = run_subprocess(cmd, "token", self.logger, check=True, capture_output=True, timeout=15)
|
||||
return json.loads(result.stdout.decode("utf-8").strip())["token"]
|
||||
|
||||
def get_connector_id(self, config):
|
||||
op = self.get_tunnel_info(config.get_tunnel_id())
|
||||
connectors = []
|
||||
for conn in op["conns"]:
|
||||
connectors.append(conn["id"])
|
||||
return connectors
|
||||
|
||||
def get_tunnel_info(self, tunnel_id):
|
||||
info = self._run_command(["info", "--output", "json", tunnel_id], "info")
|
||||
return json.loads(info.stdout)
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
cloudflared_binary: "cloudflared"
|
||||
tunnel: "ae21a96c-24d1-4ce8-a6ba-962cba5976d3"
|
||||
credentials_file: "/Users/sudarsan/.cloudflared/ae21a96c-24d1-4ce8-a6ba-962cba5976d3.json"
|
||||
origincert: "/Users/sudarsan/.cloudflared/cert.pem"
|
||||
ingress:
|
||||
- hostname: named-tunnel-component-tests.example.com
|
||||
service: hello_world
|
||||
- service: http_status:404
|
|
@ -4,6 +4,7 @@ BACKOFF_SECS = 7
|
|||
MAX_LOG_LINES = 50
|
||||
|
||||
PROXY_DNS_PORT = 9053
|
||||
MANAGEMENT_HOST_NAME = "https://management.argotunnel.com"
|
||||
|
||||
|
||||
def protocols():
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
#!/usr/bin/env python
|
||||
import requests
|
||||
from conftest import CfdModes
|
||||
from constants import METRICS_PORT, MAX_RETRIES, BACKOFF_SECS
|
||||
from constants import METRICS_PORT, MAX_RETRIES, BACKOFF_SECS, MANAGEMENT_HOST_NAME
|
||||
from retrying import retry
|
||||
from util import LOGGER, start_cloudflared, wait_tunnel_ready, send_requests
|
||||
from cli import CloudflaredCli
|
||||
from util import LOGGER, write_config, start_cloudflared, wait_tunnel_ready, send_requests
|
||||
import platform
|
||||
|
||||
class TestTunnel:
|
||||
'''Test tunnels with no ingress rules from config.yaml but ingress rules from CLI only'''
|
||||
|
@ -14,6 +16,39 @@ class TestTunnel:
|
|||
with start_cloudflared(tmp_path, config, cfd_args=["run", "--hello-world"], new_process=True):
|
||||
wait_tunnel_ready(tunnel_url=config.get_url(),
|
||||
require_min_connections=4)
|
||||
|
||||
"""
|
||||
test_get_host_details does the following:
|
||||
1. It gets a management token from Tunnelstore using cloudflared tail token <tunnel_id>
|
||||
2. It gets the connector_id after starting a cloudflare tunnel
|
||||
3. It sends a request to the management host with the connector_id and management token
|
||||
4. Asserts that the response has a hostname and ip.
|
||||
"""
|
||||
def test_get_host_details(self, tmp_path, component_tests_config):
|
||||
# TUN-7377 : wait_tunnel_ready does not work properly in windows.
|
||||
# Skipping this test for windows for now and will address it as part of tun-7377
|
||||
if platform.system() == "Windows":
|
||||
return
|
||||
config = component_tests_config(cfd_mode=CfdModes.NAMED, run_proxy_dns=False, provide_ingress=False)
|
||||
LOGGER.debug(config)
|
||||
headers = {}
|
||||
headers["Content-Type"] = "application/json"
|
||||
config_path = write_config(tmp_path, config.full_config)
|
||||
with start_cloudflared(tmp_path, config, cfd_args=["run", "--hello-world"], new_process=True):
|
||||
wait_tunnel_ready(tunnel_url=config.get_url(),
|
||||
require_min_connections=4)
|
||||
cfd_cli = CloudflaredCli(config, config_path, LOGGER)
|
||||
access_jwt = cfd_cli.get_management_token(config, config_path)
|
||||
connector_id = cfd_cli.get_connector_id(config)[0]
|
||||
url = f"{MANAGEMENT_HOST_NAME}/host_details?connector_id={connector_id}&access_token={access_jwt}"
|
||||
resp = send_request(url, headers=headers)
|
||||
|
||||
# Assert response json.
|
||||
assert resp.status_code == 200, "Expected cloudflared to return 200 for host details"
|
||||
assert resp.json()["hostname"] != "", "Expected cloudflared to return hostname"
|
||||
assert resp.json()["ip"] != "", "Expected cloudflared to return ip"
|
||||
assert resp.json()["connector_id"] == connector_id, "Expected cloudflared to return connector_id"
|
||||
|
||||
|
||||
def test_tunnel_url(self, tmp_path, component_tests_config):
|
||||
config = component_tests_config(cfd_mode=CfdModes.NAMED, run_proxy_dns=False, provide_ingress=False)
|
||||
|
@ -38,6 +73,6 @@ class TestTunnel:
|
|||
|
||||
|
||||
@retry(stop_max_attempt_number=MAX_RETRIES, wait_fixed=BACKOFF_SECS * 1000)
|
||||
def send_request(url):
|
||||
def send_request(url, headers={}):
|
||||
with requests.Session() as s:
|
||||
return s.get(url, timeout=BACKOFF_SECS)
|
||||
return s.get(url, timeout=BACKOFF_SECS, headers=headers)
|
||||
|
|
|
@ -9,6 +9,7 @@ import pytest
|
|||
|
||||
import requests
|
||||
import yaml
|
||||
import json
|
||||
from retrying import retry
|
||||
|
||||
from constants import METRICS_PORT, MAX_RETRIES, BACKOFF_SECS
|
||||
|
@ -48,7 +49,6 @@ def start_cloudflared(directory, config, cfd_args=["run"], cfd_pre_args=["tunnel
|
|||
# By setting check=True, it will raise an exception if the process exits with non-zero exit code
|
||||
return subprocess.run(cmd, check=expect_success, capture_output=capture_output)
|
||||
|
||||
|
||||
def cloudflared_cmd(config, config_path, cfd_args, cfd_pre_args, root):
|
||||
cmd = []
|
||||
if root:
|
||||
|
@ -106,13 +106,14 @@ def inner_wait_tunnel_ready(tunnel_url=None, require_min_connections=1):
|
|||
with requests.Session() as s:
|
||||
resp = send_request(s, metrics_url, True)
|
||||
|
||||
assert resp.json()["readyConnections"] >= require_min_connections, \
|
||||
ready_connections = resp.json()["readyConnections"]
|
||||
|
||||
assert ready_connections >= require_min_connections, \
|
||||
f"Ready endpoint returned {resp.json()} but we expect at least {require_min_connections} connections"
|
||||
|
||||
if tunnel_url is not None:
|
||||
send_request(s, tunnel_url, True)
|
||||
|
||||
|
||||
def _log_cloudflared_logs(cfd_logs):
|
||||
log_file = cfd_logs
|
||||
if os.path.isdir(cfd_logs):
|
||||
|
|
|
@ -2,12 +2,15 @@ package management
|
|||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog"
|
||||
"nhooyr.io/websocket"
|
||||
)
|
||||
|
@ -29,6 +32,9 @@ type ManagementService struct {
|
|||
// The management tunnel hostname
|
||||
Hostname string
|
||||
|
||||
serviceIP string
|
||||
clientID uuid.UUID
|
||||
|
||||
log *zerolog.Logger
|
||||
router chi.Router
|
||||
|
||||
|
@ -42,16 +48,24 @@ type ManagementService struct {
|
|||
logger LoggerListener
|
||||
}
|
||||
|
||||
func New(managementHostname string, log *zerolog.Logger, logger LoggerListener) *ManagementService {
|
||||
func New(managementHostname string,
|
||||
serviceIP string,
|
||||
clientID uuid.UUID,
|
||||
log *zerolog.Logger,
|
||||
logger LoggerListener,
|
||||
) *ManagementService {
|
||||
s := &ManagementService{
|
||||
Hostname: managementHostname,
|
||||
log: log,
|
||||
logger: logger,
|
||||
Hostname: managementHostname,
|
||||
log: log,
|
||||
logger: logger,
|
||||
serviceIP: serviceIP,
|
||||
clientID: clientID,
|
||||
}
|
||||
r := chi.NewRouter()
|
||||
r.Get("/ping", ping)
|
||||
r.Head("/ping", ping)
|
||||
r.Get("/logs", s.logs)
|
||||
r.Get("/host_details", s.getHostDetails)
|
||||
s.router = r
|
||||
return s
|
||||
}
|
||||
|
@ -65,6 +79,42 @@ func ping(w http.ResponseWriter, r *http.Request) {
|
|||
w.WriteHeader(200)
|
||||
}
|
||||
|
||||
// The response provided by the /host_details endpoint
|
||||
type getHostDetailsResponse struct {
|
||||
ClientID string `json:"connector_id"`
|
||||
IP string `json:"ip,omitempty"`
|
||||
HostName string `json:"hostname,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ManagementService) getHostDetails(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
var getHostDetailsResponse = getHostDetailsResponse{
|
||||
ClientID: m.clientID.String(),
|
||||
}
|
||||
if ip, err := getPrivateIP(m.serviceIP); err == nil {
|
||||
getHostDetailsResponse.IP = ip
|
||||
}
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
getHostDetailsResponse.HostName = hostname
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(200)
|
||||
json.NewEncoder(w).Encode(getHostDetailsResponse)
|
||||
}
|
||||
|
||||
// Get preferred private ip of this machine
|
||||
func getPrivateIP(addr string) (string, error) {
|
||||
conn, err := net.DialTimeout("tcp", addr, 1*time.Second)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer conn.Close()
|
||||
localAddr := conn.LocalAddr().String()
|
||||
host, _, err := net.SplitHostPort(localAddr)
|
||||
return host, err
|
||||
}
|
||||
|
||||
// readEvents will loop through all incoming websocket messages from a client and marshal them into the
|
||||
// proper Event structure and pass through to the events channel. Any invalid messages sent will automatically
|
||||
// terminate the connection.
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gobwas/ws/wsutil"
|
||||
"github.com/google/uuid"
|
||||
gows "github.com/gorilla/websocket"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -50,7 +51,7 @@ func TestUpdateConfiguration(t *testing.T) {
|
|||
initConfig := &Config{
|
||||
Ingress: &ingress.Ingress{},
|
||||
}
|
||||
orchestrator, err := NewOrchestrator(context.Background(), initConfig, testTags, []ingress.Rule{ingress.NewManagementRule(management.New("management.argotunnel.com", &testLogger, nil))}, &testLogger)
|
||||
orchestrator, err := NewOrchestrator(context.Background(), initConfig, testTags, []ingress.Rule{ingress.NewManagementRule(management.New("management.argotunnel.com", "1.1.1.1:80", uuid.Nil, &testLogger, nil))}, &testLogger)
|
||||
require.NoError(t, err)
|
||||
initOriginProxy, err := orchestrator.GetOriginProxy()
|
||||
require.NoError(t, err)
|
||||
|
|
Loading…
Reference in New Issue