TUN-7463: Add default ingress rule if no ingress rules are provided when updating the configuration

This commit is contained in:
João "Pisco" Fernandes 2023-06-09 18:17:04 +01:00
parent 58b27a1ccf
commit 925ec100d6
4 changed files with 63 additions and 27 deletions

View File

@ -399,7 +399,7 @@ func StartServer(
} }
} }
localRules := []ingress.Rule{} internalRules := []ingress.Rule{}
if features.Contains(features.FeatureManagementLogs) { if features.Contains(features.FeatureManagementLogs) {
serviceIP := c.String("service-op-ip") serviceIP := c.String("service-op-ip")
if edgeAddrs, err := edgediscovery.ResolveEdge(log, tunnelConfig.Region, tunnelConfig.EdgeIPVersion); err == nil { if edgeAddrs, err := edgediscovery.ResolveEdge(log, tunnelConfig.Region, tunnelConfig.EdgeIPVersion); err == nil {
@ -416,9 +416,9 @@ func StartServer(
logger.ManagementLogger.Log, logger.ManagementLogger.Log,
logger.ManagementLogger, logger.ManagementLogger,
) )
localRules = []ingress.Rule{ingress.NewManagementRule(mgmt)} internalRules = []ingress.Rule{ingress.NewManagementRule(mgmt)}
} }
orchestrator, err := orchestration.NewOrchestrator(ctx, orchestratorConfig, tunnelConfig.Tags, localRules, tunnelConfig.Log) orchestrator, err := orchestration.NewOrchestrator(ctx, orchestratorConfig, tunnelConfig.Tags, internalRules, tunnelConfig.Log)
if err != nil { if err != nil {
return err return err
} }

View File

@ -44,7 +44,7 @@ func (ing Ingress) FindMatchingRule(hostname, path string) (*Rule, int) {
if err == nil { if err == nil {
hostname = host hostname = host
} }
for i, rule := range ing.LocalRules { for i, rule := range ing.InternalRules {
if rule.Matches(hostname, path) { if rule.Matches(hostname, path) {
// Local rule matches return a negative rule index to distiguish local rules from user-defined rules in logs // Local rule matches return a negative rule index to distiguish local rules from user-defined rules in logs
// Full range would be [-1 .. ) // Full range would be [-1 .. )
@ -77,7 +77,7 @@ func matchHost(ruleHost, reqHost string) bool {
// Ingress maps eyeball requests to origins. // Ingress maps eyeball requests to origins.
type Ingress struct { type Ingress struct {
// Set of ingress rules that are not added to remote config, e.g. management // Set of ingress rules that are not added to remote config, e.g. management
LocalRules []Rule InternalRules []Rule
// Rules that are provided by the user from remote or local configuration // Rules that are provided by the user from remote or local configuration
Rules []Rule `json:"ingress"` Rules []Rule `json:"ingress"`
Defaults OriginRequestConfig `json:"originRequest"` Defaults OriginRequestConfig `json:"originRequest"`
@ -110,16 +110,18 @@ func ParseIngressFromConfigAndCLI(conf *config.Configuration, c *cli.Context, lo
// --bastion for ssh bastion service // --bastion for ssh bastion service
ingressRules, err = parseCLIIngress(c, false) ingressRules, err = parseCLIIngress(c, false)
if errors.Is(err, ErrNoIngressRulesCLI) { if errors.Is(err, ErrNoIngressRulesCLI) {
// Only log a warning if the tunnel is not a remotely managed tunnel and the config // If no token is provided, the probability of NOT being a remotely managed tunnel is higher.
// will be loaded after connecting. // So, we should warn the user that no ingress rules were found, because remote configuration will most likely not exist.
if !c.IsSet("token") { if !c.IsSet("token") {
log.Warn().Msgf(ErrNoIngressRulesCLI.Error()) log.Warn().Msgf(ErrNoIngressRulesCLI.Error())
} }
return newDefaultOrigin(c, log), nil return newDefaultOrigin(c, log), nil
} }
if err != nil { if err != nil {
return Ingress{}, err return Ingress{}, err
} }
return ingressRules, nil return ingressRules, nil
} }
@ -148,14 +150,10 @@ func parseCLIIngress(c *cli.Context, allowURLFromArgs bool) (Ingress, error) {
// newDefaultOrigin always returns a 503 response code to help indicate that there are no ingress // newDefaultOrigin always returns a 503 response code to help indicate that there are no ingress
// rules setup, but the tunnel is reachable. // rules setup, but the tunnel is reachable.
func newDefaultOrigin(c *cli.Context, log *zerolog.Logger) Ingress { func newDefaultOrigin(c *cli.Context, log *zerolog.Logger) Ingress {
noRulesService := newDefaultStatusCode(log) defaultRule := GetDefaultIngressRules(log)
defaults := originRequestFromSingleRule(c) defaults := originRequestFromSingleRule(c)
ingress := Ingress{ ingress := Ingress{
Rules: []Rule{ Rules: defaultRule,
{
Service: &noRulesService,
},
},
Defaults: defaults, Defaults: defaults,
} }
return ingress return ingress
@ -219,6 +217,17 @@ func (ing Ingress) CatchAll() *Rule {
return &ing.Rules[len(ing.Rules)-1] return &ing.Rules[len(ing.Rules)-1]
} }
// Gets the default ingress rule that will be return 503 status
// code for all incoming requests.
func GetDefaultIngressRules(log *zerolog.Logger) []Rule {
noRulesService := newDefaultStatusCode(log)
return []Rule{
{
Service: &noRulesService,
},
}
}
func validateAccessConfiguration(cfg *config.AccessConfig) error { func validateAccessConfiguration(cfg *config.AccessConfig) error {
if !cfg.Required { if !cfg.Required {
return nil return nil

View File

@ -28,8 +28,8 @@ type Orchestrator struct {
lock sync.RWMutex lock sync.RWMutex
// Underlying value is proxy.Proxy, can be read without the lock, but still needs the lock to update // Underlying value is proxy.Proxy, can be read without the lock, but still needs the lock to update
proxy atomic.Value proxy atomic.Value
// Set of local ingress rules defined at cloudflared startup (separate from user-defined ingress rules) // Set of internal ingress rules defined at cloudflared startup (separate from user-defined ingress rules)
localRules []ingress.Rule internalRules []ingress.Rule
warpRoutingEnabled atomic.Bool warpRoutingEnabled atomic.Bool
config *Config config *Config
tags []tunnelpogs.Tag tags []tunnelpogs.Tag
@ -41,13 +41,13 @@ type Orchestrator struct {
proxyShutdownC chan<- struct{} proxyShutdownC chan<- struct{}
} }
func NewOrchestrator(ctx context.Context, config *Config, tags []tunnelpogs.Tag, localRules []ingress.Rule, log *zerolog.Logger) (*Orchestrator, error) { func NewOrchestrator(ctx context.Context, config *Config, tags []tunnelpogs.Tag, internalRules []ingress.Rule, log *zerolog.Logger) (*Orchestrator, error) {
o := &Orchestrator{ o := &Orchestrator{
// Lowest possible version, any remote configuration will have version higher than this // Lowest possible version, any remote configuration will have version higher than this
// Starting at -1 allows a configuration migration (local to remote) to override the current configuration as it // Starting at -1 allows a configuration migration (local to remote) to override the current configuration as it
// will start at version 0. // will start at version 0.
currentVersion: -1, currentVersion: -1,
localRules: localRules, internalRules: internalRules,
config: config, config: config,
tags: tags, tags: tags,
log: log, log: log,
@ -116,8 +116,13 @@ func (o *Orchestrator) updateIngress(ingressRules ingress.Ingress, warpRouting i
default: default:
} }
// Assign the local ingress rules to the parsed ingress // Assign the internal ingress rules to the parsed ingress
ingressRules.LocalRules = o.localRules ingressRules.InternalRules = o.internalRules
// Check if ingress rules are empty, and add the default route if so.
if ingressRules.IsEmpty() {
ingressRules.Rules = ingress.GetDefaultIngressRules(o.log)
}
// Start new proxy before closing the ones from last version. // Start new proxy before closing the ones from last version.
// The upside is we don't need to restart proxy from last version, which can fail // The upside is we don't need to restart proxy from last version, which can fail

View File

@ -96,10 +96,10 @@ func TestUpdateConfiguration(t *testing.T) {
updateWithValidation(t, orchestrator, 2, configJSONV2) updateWithValidation(t, orchestrator, 2, configJSONV2)
configV2 := orchestrator.config configV2 := orchestrator.config
// Validate local ingress rules // Validate internal ingress rules
require.Equal(t, "management.argotunnel.com", configV2.Ingress.LocalRules[0].Hostname) require.Equal(t, "management.argotunnel.com", configV2.Ingress.InternalRules[0].Hostname)
require.True(t, configV2.Ingress.LocalRules[0].Matches("management.argotunnel.com", "/ping")) require.True(t, configV2.Ingress.InternalRules[0].Matches("management.argotunnel.com", "/ping"))
require.Equal(t, "management", configV2.Ingress.LocalRules[0].Service.String()) require.Equal(t, "management", configV2.Ingress.InternalRules[0].Service.String())
// Validate ingress rule 0 // Validate ingress rule 0
require.Equal(t, "jira.tunnel.org", configV2.Ingress.Rules[0].Hostname) require.Equal(t, "jira.tunnel.org", configV2.Ingress.Rules[0].Hostname)
require.True(t, configV2.Ingress.Rules[0].Matches("jira.tunnel.org", "/login")) require.True(t, configV2.Ingress.Rules[0].Matches("jira.tunnel.org", "/login"))
@ -210,6 +210,28 @@ func TestUpdateConfiguration_FromMigration(t *testing.T) {
require.Len(t, orchestrator.config.Ingress.Rules, 1) require.Len(t, orchestrator.config.Ingress.Rules, 1)
} }
// Validates that the default ingress rule will be set if there is no rule provided from the remote.
func TestUpdateConfiguration_WithoutIngressRule(t *testing.T) {
initConfig := &Config{
Ingress: &ingress.Ingress{},
}
orchestrator, err := NewOrchestrator(context.Background(), initConfig, testTags, []ingress.Rule{}, &testLogger)
require.NoError(t, err)
initOriginProxy, err := orchestrator.GetOriginProxy()
require.NoError(t, err)
require.Implements(t, (*connection.OriginProxy)(nil), initOriginProxy)
// We need to create an empty RemoteConfigJSON because that will get unmarshalled to a RemoteConfig
emptyConfig := &ingress.RemoteConfigJSON{}
configBytes, err := json.Marshal(emptyConfig)
if err != nil {
require.FailNow(t, "The RemoteConfigJSON shouldn't fail while being marshalled")
}
updateWithValidation(t, orchestrator, 0, configBytes)
require.Len(t, orchestrator.config.Ingress.Rules, 1)
}
// TestConcurrentUpdateAndRead makes sure orchestrator can receive updates and return origin proxy concurrently // TestConcurrentUpdateAndRead makes sure orchestrator can receive updates and return origin proxy concurrently
func TestConcurrentUpdateAndRead(t *testing.T) { func TestConcurrentUpdateAndRead(t *testing.T) {
const ( const (