Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix sarama consumer deadlock #2587

Merged
merged 3 commits into from
Nov 2, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 18 additions & 23 deletions cmd/ingester/app/consumer/consumer.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,11 @@ type Consumer struct {
partitionMapLock sync.Mutex
partitionsHeld int64
partitionsHeldGauge metrics.Gauge

doneWg sync.WaitGroup
}

type consumerState struct {
wg sync.WaitGroup
partitionConsumer sc.PartitionConsumer
}

Expand All @@ -78,17 +79,11 @@ func (c *Consumer) Start() {
c.logger.Info("Starting main loop")
for pc := range c.internalConsumer.Partitions() {
c.partitionMapLock.Lock()
if p, ok := c.partitionIDToState[pc.Partition()]; ok {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are you sure this is safe to remove? From the comment is seems like a case where a partition gets re-assigned to the same service instance, so there's a potentially another partition consumer for this partition ID already running (or more likely shutting down).

@vprithvi do you have any more color to add to this condition?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We noticed the case that you described a couple of times, but weren't able to reproduce it in tests.
Since then, many things have changed with both Kafka and Sarama. I think we can remove this now in the interest of simplifying and revisit if see problems.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How did it manifest when it happened? Committed offsets going backwards?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IIUC, this happens when new messages arrive on the partition (because the Messages() channel isn't closed yet) after the partition consumer was closed (via the c.closePartition(p.partitionConsumer) call), causing a new partition consumer to be created.

This PR proposes to solve this by calling c.internalConsumer.Close() first within Close() which closes the Messages() channel, preventing new messages from arriving, and then closes the Partitions() channel, preventing new PartitionConsumers from being created.

// This is a guard against simultaneously draining messages
// from the last time the partition was assigned and
// processing new messages for the same partition, which may lead
// to the cleanup process not completing
p.wg.Wait()
}
c.partitionIDToState[pc.Partition()] = &consumerState{partitionConsumer: pc}
c.partitionIDToState[pc.Partition()].wg.Add(2)
c.partitionMapLock.Unlock()
c.partitionMetrics(pc.Partition()).startCounter.Inc(1)

c.doneWg.Add(2)
go c.handleMessages(pc)
go c.handleErrors(pc.Partition(), pc.Errors())
}
Expand All @@ -97,31 +92,33 @@ func (c *Consumer) Start() {

// Close closes the Consumer and underlying sarama consumer
func (c *Consumer) Close() error {
c.partitionMapLock.Lock()
for _, p := range c.partitionIDToState {
c.closePartition(p.partitionConsumer)
p.wg.Wait()
}
c.partitionMapLock.Unlock()
c.deadlockDetector.close()
// Close the internal consumer, which will close each partition consumers' message and error channels.
c.logger.Info("Closing parent consumer")
return c.internalConsumer.Close()
err := c.internalConsumer.Close()

c.logger.Debug("Closing deadlock detector")
c.deadlockDetector.close()

c.logger.Debug("Waiting for messages and errors to be handled")
c.doneWg.Wait()

return err
}

// handleMessages handles incoming Kafka messages on a channel
func (c *Consumer) handleMessages(pc sc.PartitionConsumer) {
c.logger.Info("Starting message handler", zap.Int32("partition", pc.Partition()))
c.partitionMapLock.Lock()
c.partitionsHeld++
c.partitionsHeldGauge.Update(c.partitionsHeld)
wg := &c.partitionIDToState[pc.Partition()].wg
c.partitionMapLock.Unlock()
defer func() {
c.closePartition(pc)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this closePartition still necessary provided internalConsumer.Close() closes all partitions?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup, I think you're right, @vprithvi. 👍

However, closePartition() also increments a metrics counter, which is used in tests to detect whether if the partition is closed or not; neither the Consumer nor PartitionConsumer interface appear to offer an "OnClosed" callback or channel to signal successful closure of a partition.

We could instead replace c.closePartition(pc) with c.partitionMetrics(pc.Partition()).closeCounter.Inc(1) to bump up the metric and remove the closePartition() function, if that sounds reasonable to you?

wg.Done()
c.partitionMapLock.Lock()
c.partitionsHeld--
c.partitionsHeldGauge.Update(c.partitionsHeld)
c.partitionMapLock.Unlock()
c.doneWg.Done()
}()

msgMetrics := c.newMsgMetrics(pc.Partition())
Expand Down Expand Up @@ -165,12 +162,10 @@ func (c *Consumer) closePartition(partitionConsumer sc.PartitionConsumer) {
c.logger.Info("Closed partition consumer", zap.Int32("partition", partitionConsumer.Partition()))
}

// handleErrors handles incoming Kafka consumer errors on a channel
func (c *Consumer) handleErrors(partition int32, errChan <-chan *sarama.ConsumerError) {
c.logger.Info("Starting error handler", zap.Int32("partition", partition))
c.partitionMapLock.Lock()
wg := &c.partitionIDToState[partition].wg
c.partitionMapLock.Unlock()
defer wg.Done()
defer c.doneWg.Done()

errMetrics := c.newErrMetrics(partition)
for err := range errChan {
yurishkuro marked this conversation as resolved.
Show resolved Hide resolved
Expand Down
31 changes: 17 additions & 14 deletions cmd/ingester/app/consumer/consumer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func (s partitionConsumerWrapper) Topic() string {
return s.topic
}

func newSaramaClusterConsumer(saramaPartitionConsumer sarama.PartitionConsumer) *kmocks.Consumer {
func newSaramaClusterConsumer(saramaPartitionConsumer sarama.PartitionConsumer, mc *smocks.PartitionConsumer) *kmocks.Consumer {
pcha := make(chan cluster.PartitionConsumer, 1)
pcha <- &partitionConsumerWrapper{
topic: topic,
Expand All @@ -77,27 +77,26 @@ func newSaramaClusterConsumer(saramaPartitionConsumer sarama.PartitionConsumer)
}
saramaClusterConsumer := &kmocks.Consumer{}
saramaClusterConsumer.On("Partitions").Return((<-chan cluster.PartitionConsumer)(pcha))
saramaClusterConsumer.On("Close").Return(nil)
saramaClusterConsumer.On("Close").Return(nil).Run(func(args mock.Arguments) {
mc.Close()
})
saramaClusterConsumer.On("MarkPartitionOffset", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
return saramaClusterConsumer
}

func newConsumer(
t *testing.T,
metricsFactory metrics.Factory,
topic string,
processor processor.SpanProcessor,
consumer consumer.Consumer) *Consumer {

logger, _ := zap.NewDevelopment()
return &Consumer{
metricsFactory: metricsFactory,
logger: logger,
internalConsumer: consumer,
partitionIDToState: make(map[int32]*consumerState),
partitionsHeldGauge: partitionsHeldGauge(metricsFactory),
deadlockDetector: newDeadlockDetector(metricsFactory, logger, time.Second),

processorFactory: ProcessorFactory{
consumerParams := Params{
MetricsFactory: metricsFactory,
Logger: logger,
InternalConsumer: consumer,
ProcessorFactory: ProcessorFactory{
topic: topic,
consumer: consumer,
metricsFactory: metricsFactory,
Expand All @@ -106,6 +105,10 @@ func newConsumer(
parallelism: 1,
},
}

c, err := New(consumerParams)
require.NoError(t, err)
return c
}

func TestSaramaConsumerWrapper_MarkPartitionOffset(t *testing.T) {
Expand Down Expand Up @@ -136,7 +139,7 @@ func TestSaramaConsumerWrapper_start_Messages(t *testing.T) {
saramaPartitionConsumer, e := saramaConsumer.ConsumePartition(topic, partition, msgOffset)
require.NoError(t, e)

undertest := newConsumer(localFactory, topic, mp, newSaramaClusterConsumer(saramaPartitionConsumer))
undertest := newConsumer(t, localFactory, topic, mp, newSaramaClusterConsumer(saramaPartitionConsumer, mc))

undertest.partitionIDToState = map[int32]*consumerState{
partition: {
Expand Down Expand Up @@ -202,7 +205,7 @@ func TestSaramaConsumerWrapper_start_Errors(t *testing.T) {
saramaPartitionConsumer, e := saramaConsumer.ConsumePartition(topic, partition, msgOffset)
require.NoError(t, e)

undertest := newConsumer(localFactory, topic, &pmocks.SpanProcessor{}, newSaramaClusterConsumer(saramaPartitionConsumer))
undertest := newConsumer(t, localFactory, topic, &pmocks.SpanProcessor{}, newSaramaClusterConsumer(saramaPartitionConsumer, mc))

undertest.Start()
mc.YieldError(errors.New("Daisy, Daisy"))
Expand Down Expand Up @@ -238,7 +241,7 @@ func TestHandleClosePartition(t *testing.T) {
saramaPartitionConsumer, e := saramaConsumer.ConsumePartition(topic, partition, msgOffset)
require.NoError(t, e)

undertest := newConsumer(metricsFactory, topic, mp, newSaramaClusterConsumer(saramaPartitionConsumer))
undertest := newConsumer(t, metricsFactory, topic, mp, newSaramaClusterConsumer(saramaPartitionConsumer, mc))
undertest.deadlockDetector = newDeadlockDetector(metricsFactory, undertest.logger, 200*time.Millisecond)
undertest.Start()
defer undertest.Close()
Expand Down