@@ -48,7 +48,20 @@ type kafkaConsumer struct {
48
48
logger * zap.Logger
49
49
}
50
50
51
+ // kafkaLogsConsumer uses sarama to consume and handle messages from kafka.
52
+ type kafkaLogsConsumer struct {
53
+ name string
54
+ consumerGroup sarama.ConsumerGroup
55
+ nextConsumer consumer.Logs
56
+ topics []string
57
+ cancelConsumeLoop context.CancelFunc
58
+ unmarshaller LogsUnmarshaller
59
+
60
+ logger * zap.Logger
61
+ }
62
+
51
63
var _ component.Receiver = (* kafkaConsumer )(nil )
64
+ var _ component.Receiver = (* kafkaLogsConsumer )(nil )
52
65
53
66
func newReceiver (config Config , params component.ReceiverCreateParams , unmarshalers map [string ]Unmarshaller , nextConsumer consumer.Traces ) (* kafkaConsumer , error ) {
54
67
unmarshaller := unmarshalers [config .Encoding ]
@@ -121,6 +134,77 @@ func (c *kafkaConsumer) Shutdown(context.Context) error {
121
134
return c .consumerGroup .Close ()
122
135
}
123
136
137
+ func newLogsReceiver (config Config , params component.ReceiverCreateParams , unmarshalers map [string ]LogsUnmarshaller , nextConsumer consumer.Logs ) (* kafkaLogsConsumer , error ) {
138
+ unmarshaller := unmarshalers [config .Encoding ]
139
+ if unmarshaller == nil {
140
+ return nil , errUnrecognizedEncoding
141
+ }
142
+
143
+ c := sarama .NewConfig ()
144
+ c .ClientID = config .ClientID
145
+ c .Metadata .Full = config .Metadata .Full
146
+ c .Metadata .Retry .Max = config .Metadata .Retry .Max
147
+ c .Metadata .Retry .Backoff = config .Metadata .Retry .Backoff
148
+ if config .ProtocolVersion != "" {
149
+ version , err := sarama .ParseKafkaVersion (config .ProtocolVersion )
150
+ if err != nil {
151
+ return nil , err
152
+ }
153
+ c .Version = version
154
+ }
155
+ if err := kafkaexporter .ConfigureAuthentication (config .Authentication , c ); err != nil {
156
+ return nil , err
157
+ }
158
+ client , err := sarama .NewConsumerGroup (config .Brokers , config .GroupID , c )
159
+ if err != nil {
160
+ return nil , err
161
+ }
162
+ return & kafkaLogsConsumer {
163
+ name : config .Name (),
164
+ consumerGroup : client ,
165
+ topics : []string {config .Topic },
166
+ nextConsumer : nextConsumer ,
167
+ unmarshaller : unmarshaller ,
168
+ logger : params .Logger ,
169
+ }, nil
170
+ }
171
+
172
+ func (c * kafkaLogsConsumer ) Start (context.Context , component.Host ) error {
173
+ ctx , cancel := context .WithCancel (context .Background ())
174
+ c .cancelConsumeLoop = cancel
175
+ logsConsumerGroup := & logsConsumerGroupHandler {
176
+ name : c .name ,
177
+ logger : c .logger ,
178
+ unmarshaller : c .unmarshaller ,
179
+ nextConsumer : c .nextConsumer ,
180
+ ready : make (chan bool ),
181
+ }
182
+ go c .consumeLoop (ctx , logsConsumerGroup )
183
+ <- logsConsumerGroup .ready
184
+ return nil
185
+ }
186
+
187
+ func (c * kafkaLogsConsumer ) consumeLoop (ctx context.Context , handler sarama.ConsumerGroupHandler ) error {
188
+ for {
189
+ // `Consume` should be called inside an infinite loop, when a
190
+ // server-side rebalance happens, the consumer session will need to be
191
+ // recreated to get the new claims
192
+ if err := c .consumerGroup .Consume (ctx , c .topics , handler ); err != nil {
193
+ c .logger .Error ("Error from consumer" , zap .Error (err ))
194
+ }
195
+ // check if context was cancelled, signaling that the consumer should stop
196
+ if ctx .Err () != nil {
197
+ c .logger .Info ("Consumer stopped" , zap .Error (ctx .Err ()))
198
+ return ctx .Err ()
199
+ }
200
+ }
201
+ }
202
+
203
+ func (c * kafkaLogsConsumer ) Shutdown (context.Context ) error {
204
+ c .cancelConsumeLoop ()
205
+ return c .consumerGroup .Close ()
206
+ }
207
+
124
208
type consumerGroupHandler struct {
125
209
name string
126
210
unmarshaller Unmarshaller
@@ -131,7 +215,18 @@ type consumerGroupHandler struct {
131
215
logger * zap.Logger
132
216
}
133
217
218
+ type logsConsumerGroupHandler struct {
219
+ name string
220
+ unmarshaller LogsUnmarshaller
221
+ nextConsumer consumer.Logs
222
+ ready chan bool
223
+ readyCloser sync.Once
224
+
225
+ logger * zap.Logger
226
+ }
227
+
134
228
var _ sarama.ConsumerGroupHandler = (* consumerGroupHandler )(nil )
229
+ var _ sarama.ConsumerGroupHandler = (* logsConsumerGroupHandler )(nil )
135
230
136
231
func (c * consumerGroupHandler ) Setup (session sarama.ConsumerGroupSession ) error {
137
232
c .readyCloser .Do (func () {
@@ -180,3 +275,51 @@ func (c *consumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
180
275
}
181
276
return nil
182
277
}
278
+
279
+ func (c * logsConsumerGroupHandler ) Setup (session sarama.ConsumerGroupSession ) error {
280
+ c .readyCloser .Do (func () {
281
+ close (c .ready )
282
+ })
283
+ statsTags := []tag.Mutator {tag .Insert (tagInstanceName , c .name )}
284
+ _ = stats .RecordWithTags (session .Context (), statsTags , statPartitionStart .M (1 ))
285
+ return nil
286
+ }
287
+
288
+ func (c * logsConsumerGroupHandler ) Cleanup (session sarama.ConsumerGroupSession ) error {
289
+ statsTags := []tag.Mutator {tag .Insert (tagInstanceName , c .name )}
290
+ _ = stats .RecordWithTags (session .Context (), statsTags , statPartitionClose .M (1 ))
291
+ return nil
292
+ }
293
+
294
+ func (c * logsConsumerGroupHandler ) ConsumeClaim (session sarama.ConsumerGroupSession , claim sarama.ConsumerGroupClaim ) error {
295
+ c .logger .Info ("Starting consumer group" , zap .Int32 ("partition" , claim .Partition ()))
296
+ for message := range claim .Messages () {
297
+ c .logger .Debug ("Kafka message claimed" ,
298
+ zap .String ("value" , string (message .Value )),
299
+ zap .Time ("timestamp" , message .Timestamp ),
300
+ zap .String ("topic" , message .Topic ))
301
+ session .MarkMessage (message , "" )
302
+
303
+ ctx := obsreport .ReceiverContext (session .Context (), c .name , transport )
304
+ ctx = obsreport .StartTraceDataReceiveOp (ctx , c .name , transport )
305
+ statsTags := []tag.Mutator {tag .Insert (tagInstanceName , c .name )}
306
+ _ = stats .RecordWithTags (ctx , statsTags ,
307
+ statMessageCount .M (1 ),
308
+ statMessageOffset .M (message .Offset ),
309
+ statMessageOffsetLag .M (claim .HighWaterMarkOffset ()- message .Offset - 1 ))
310
+
311
+ logs , err := c .unmarshaller .Unmarshal (message .Value )
312
+ if err != nil {
313
+ c .logger .Error ("failed to unmarshall message" , zap .Error (err ))
314
+ return err
315
+ }
316
+
317
+ err = c .nextConsumer .ConsumeLogs (session .Context (), logs )
318
+ // TODO
319
+ obsreport .EndTraceDataReceiveOp (ctx , c .unmarshaller .Encoding (), logs .LogRecordCount (), err )
320
+ if err != nil {
321
+ return err
322
+ }
323
+ }
324
+ return nil
325
+ }
0 commit comments