This commit is contained in:
Nirav Chotai 2025-05-13 12:30:44 -04:00 committed by GitHub
commit 78eb1f8c0a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 29 additions and 7 deletions

View File

@ -50,6 +50,7 @@ const (
noDiagNetworkFlagName = "no-diag-network"
diagContainerIDFlagName = "diag-container-id"
diagPodFlagName = "diag-pod-id"
diagNamespaceFlagName = "diag-namespace-id"
LogFieldTunnelID = "tunnelID"
)
@ -211,6 +212,11 @@ var (
Usage: "Kubernetes POD to collect logs from",
Value: "",
}
diagNamespaceFlagName = &cli.StringFlag{
Name: diagNamespaceFlagName,
Usage: "Kubernetes Namespace to collect logs from",
Value: "",
}
noDiagLogsFlag = &cli.BoolFlag{
Name: noDiagLogsFlagName,
Usage: "Log collection will not be performed",
@ -1099,6 +1105,7 @@ func diagCommand(ctx *cli.Context) error {
Address: sctx.c.String(flags.Metrics),
ContainerID: sctx.c.String(diagContainerIDFlagName),
PodID: sctx.c.String(diagPodFlagName),
NamespaceID: sctx.c.String(diagNamespaceFlagName),
Toggles: diagnostic.Toggles{
NoDiagLogs: sctx.c.Bool(noDiagLogsFlagName),
NoDiagMetrics: sctx.c.Bool(noDiagMetricsFlagName),
@ -1130,7 +1137,7 @@ func diagCommand(ctx *cli.Context) error {
}
if errors.Is(err, diagnostic.ErrLogConfigurationIsInvalid) {
log.Info().Msg("Couldn't extract logs from the instance. If the instance is running in a containerized environment use the option --diag-container-id or --diag-pod-id. If there is no logging configuration use --no-diag-logs.")
log.Info().Msg("Couldn't extract logs from the instance. If the instance is running in a containerized environment use the option --diag-container-id, --diag-pod-id or --diag-namespace-id. If there is no logging configuration use --no-diag-logs.")
}
if err != nil {

View File

@ -92,17 +92,18 @@ type Options struct {
Address string
ContainerID string
PodID string
NamespaceID string `default: "default"`
Toggles Toggles
}
func collectLogs(
ctx context.Context,
client HTTPClient,
diagContainer, diagPod string,
diagContainer, diagPod string, diagNamespace string
) (string, error) {
var collector LogCollector
if diagPod != "" {
collector = NewKubernetesLogCollector(diagContainer, diagPod)
collector = NewKubernetesLogCollector(diagContainer, diagPod, diagNamespace)
} else if diagContainer != "" {
collector = NewDockerLogCollector(diagContainer)
} else {
@ -370,6 +371,7 @@ func createJobs(
tunnel *TunnelState,
diagContainer string,
diagPod string,
diagNamespace string,
noDiagSystem bool,
noDiagRuntime bool,
noDiagMetrics bool,
@ -406,7 +408,7 @@ func createJobs(
{
jobName: logInformationJobName,
fn: func(ctx context.Context) (string, error) {
return collectLogs(ctx, client, diagContainer, diagPod)
return collectLogs(ctx, client, diagContainer, diagPod, diagNamespace)
},
bypass: noDiagLogs,
},
@ -524,6 +526,7 @@ func RunDiagnostic(
tunnel,
options.ContainerID,
options.PodID,
options.NamespaceID,
options.Toggles.NoDiagSystem,
options.Toggles.NoDiagRuntime,
options.Toggles.NoDiagMetrics,

View File

@ -12,12 +12,20 @@ import (
type KubernetesLogCollector struct {
containerID string // This member identifies the container by identifier or name
pod string // This member identifies the pod where the container is deployed
namespace string // This member identifies the namespace where the pod is deployed
}
func NewKubernetesLogCollector(containerID, pod string) *KubernetesLogCollector {
func NewKubernetesLogCollector(containerID, pod string, namespace ...string) *KubernetesLogCollector {
ns := "default"
if len(namespace) > 0 && namespace[0] != "" {
ns = namespace[0]
}
return &KubernetesLogCollector{
containerID,
pod,
containerID: containerID,
pod: pod,
namespace: ns
}
}
@ -38,6 +46,8 @@ func (collector *KubernetesLogCollector) Collect(ctx context.Context) (*LogInfor
ctx,
"kubectl",
"logs",
"-n",
collector.namespace,
collector.pod,
"--since-time",
since,
@ -51,6 +61,8 @@ func (collector *KubernetesLogCollector) Collect(ctx context.Context) (*LogInfor
ctx,
"kubectl",
"logs",
"-n",
collector.namespace,
collector.pod,
"--since-time",
since,