Skip to content

Commit c0959cf

Browse files
committed
Add structured logging
1 parent b6dbca0 commit c0959cf

16 files changed

Lines changed: 465 additions & 59 deletions

File tree

cmd/processor/main.go

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,16 @@ import (
2222
"flag"
2323
"os"
2424
"os/signal"
25+
"strings"
2526
"syscall"
2627

28+
"go.uber.org/zap"
29+
"go.uber.org/zap/zapcore"
2730
ctrl "sigs.k8s.io/controller-runtime"
2831
zaprctrl "sigs.k8s.io/controller-runtime/pkg/log/zap"
2932

3033
dataflowv1 "github.com/dataflow-operator/dataflow/api/v1"
34+
"github.com/dataflow-operator/dataflow/internal/logkeys"
3135
"github.com/dataflow-operator/dataflow/internal/processor"
3236
)
3337

@@ -44,9 +48,14 @@ func main() {
4448
opts.BindFlags(flag.CommandLine)
4549
flag.Parse()
4650

47-
// Настройка логгера
48-
ctrl.SetLogger(zaprctrl.New(zaprctrl.UseFlagOptions(&opts)))
49-
logger := ctrl.Log.WithName("processor").WithValues("namespace", namespace, "name", name)
51+
// Уровень логирования: переменная окружения LOG_LEVEL (debug, info, warn, error) или флаги
52+
levelEnabler := processorLevelFromEnv(os.Getenv("LOG_LEVEL"), opts.Level)
53+
zapOpts := []zaprctrl.Opts{zaprctrl.UseFlagOptions(&opts)}
54+
if levelEnabler != nil {
55+
zapOpts = append(zapOpts, zaprctrl.Level(levelEnabler))
56+
}
57+
ctrl.SetLogger(zaprctrl.New(zapOpts...))
58+
logger := ctrl.Log.WithName("processor").WithValues(logkeys.DataflowNamespace, namespace, logkeys.DataflowName, name)
5059

5160
// Читаем spec из файла
5261
specData, err := os.ReadFile(specPath)
@@ -102,3 +111,16 @@ func main() {
102111

103112
logger.Info("Processor stopped successfully")
104113
}
114+
115+
// processorLevelFromEnv returns zap LevelEnabler from LOG_LEVEL env if set, otherwise optsLevel.
116+
func processorLevelFromEnv(envLevel string, optsLevel zapcore.LevelEnabler) zapcore.LevelEnabler {
117+
s := strings.TrimSpace(strings.ToLower(envLevel))
118+
if s == "" {
119+
return optsLevel
120+
}
121+
var l zapcore.Level
122+
if err := l.UnmarshalText([]byte(s)); err != nil {
123+
return optsLevel
124+
}
125+
return zap.NewAtomicLevelAt(l)
126+
}

helm/dataflow-operator/templates/deployment.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,8 @@ spec:
5050
value: {{ include "dataflow-operator.fullname" . | quote }}
5151
- name: OPERATOR_NAMESPACE
5252
value: {{ .Release.Namespace | quote }}
53+
- name: LOG_LEVEL
54+
value: {{ .Values.logLevel | default "info" | quote }}
5355
args:
5456
- --metrics-bind-address=:{{ .Values.metrics.port }}
5557
- --health-probe-bind-address=:{{ .Values.health.probePort }}

helm/dataflow-operator/values.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,9 @@ metrics:
6969
# The address the metric endpoint binds to
7070
port: 9090
7171

72+
# Log level for the operator (sets env LOG_LEVEL: debug, info, warn, error)
73+
logLevel: "info"
74+
7275
# Health probe configuration
7376
health:
7477
# The address the probe endpoint binds to

internal/connectors/kafka.go

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ import (
3636

3737
"github.com/IBM/sarama"
3838
v1 "github.com/dataflow-operator/dataflow/api/v1"
39+
"github.com/dataflow-operator/dataflow/internal/logkeys"
3940
"github.com/dataflow-operator/dataflow/internal/metrics"
4041
"github.com/dataflow-operator/dataflow/internal/retry"
4142
"github.com/dataflow-operator/dataflow/internal/types"
@@ -237,6 +238,7 @@ func (k *KafkaSourceConnector) Connect(ctx context.Context) error {
237238
}(), err)
238239
}
239240
k.consumer = consumer
241+
k.logger.Info("Successfully connected to Kafka", "brokers", k.config.Brokers, "topic", k.config.Topic, "group", consumerGroup)
240242

241243
// Record connection status
242244
if k.namespace != "" && k.name != "" {
@@ -607,6 +609,7 @@ func (k *KafkaSourceConnector) Close() error {
607609
return nil
608610
}
609611

612+
k.logger.Info("Closing Kafka source connection", "brokers", k.config.Brokers, "topic", k.config.Topic)
610613
k.closed = true
611614
if k.consumer != nil {
612615
// Record connection status
@@ -668,6 +671,7 @@ func (h *kafkaConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSes
668671
msgData, err = h.connector.deserializeAvro(session.Context(), message.Value)
669672
if err != nil {
670673
h.connector.logger.Error(err, "Failed to deserialize Avro message",
674+
logkeys.MessageID, fmt.Sprintf("%d/%d", message.Partition, message.Offset),
671675
"topic", message.Topic,
672676
"partition", message.Partition,
673677
"offset", message.Offset)
@@ -685,11 +689,12 @@ func (h *kafkaConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSes
685689
msg.Metadata["partition"] = message.Partition
686690
msg.Metadata["offset"] = message.Offset
687691
msg.Metadata["key"] = string(message.Key)
692+
// Commit offset only after the message is successfully written to the sink (called by sink connectors)
693+
msg.Ack = func() { session.MarkMessage(message, "") }
688694

689695
select {
690696
case h.msgChan <- msg:
691-
session.MarkMessage(message, "")
692-
// Record metrics
697+
// Record metrics (offset will be committed by sink after successful write)
693698
if h.connector.namespace != "" && h.connector.name != "" {
694699
metrics.RecordConnectorMessageRead(h.connector.namespace, h.connector.name, "kafka", "source")
695700
}
@@ -887,6 +892,7 @@ func (k *KafkaSinkConnector) Connect(ctx context.Context) error {
887892
}(), err)
888893
}
889894
k.producer = producer
895+
k.logger.Info("Successfully connected to Kafka", "brokers", k.config.Brokers, "topic", k.config.Topic)
890896

891897
// Record connection status
892898
if k.namespace != "" && k.name != "" {
@@ -981,6 +987,10 @@ func (k *KafkaSinkConnector) Write(ctx context.Context, messages <-chan *types.M
981987

982988
msg.Metadata["partition"] = partition
983989
msg.Metadata["offset"] = offset
990+
991+
if msg.Ack != nil {
992+
msg.Ack()
993+
}
984994
}
985995
}
986996
}
@@ -994,6 +1004,7 @@ func (k *KafkaSinkConnector) Close() error {
9941004
return nil
9951005
}
9961006

1007+
k.logger.Info("Closing Kafka sink connection", "brokers", k.config.Brokers, "topic", k.config.Topic)
9971008
k.closed = true
9981009
if k.producer != nil {
9991010
// Record connection status

0 commit comments

Comments
 (0)