| 
4 | 4 | require "digest"  | 
5 | 5 | require "rspec/wait"  | 
6 | 6 | 
 
  | 
 | 7 | +def thread_it(kafka_input, queue)  | 
 | 8 | +  Thread.new do  | 
 | 9 | +    begin  | 
 | 10 | +      kafka_input.run(queue)  | 
 | 11 | +    end  | 
 | 12 | +  end  | 
 | 13 | +end  | 
 | 14 | + | 
 | 15 | +def run_with_kafka(&block)  | 
 | 16 | +  queue = Queue.new  | 
 | 17 | +  t = thread_it(kafka_input, queue)  | 
 | 18 | +  begin  | 
 | 19 | +    wait(timeout_seconds).for {queue.length}.to eq(expected_num_events)  | 
 | 20 | +    yield(queue)  | 
 | 21 | +  ensure  | 
 | 22 | +    t.kill  | 
 | 23 | +    t.join(30_000)  | 
 | 24 | +  end  | 
 | 25 | +end  | 
 | 26 | + | 
 | 27 | +shared_examples 'consumes all expected messages' do  | 
 | 28 | +  it 'should consume all expected messages' do  | 
 | 29 | +    run_with_kafka do |queue|  | 
 | 30 | +      expect(queue.length).to eq(expected_num_events)  | 
 | 31 | +    end  | 
 | 32 | +  end  | 
 | 33 | +end  | 
 | 34 | + | 
7 | 35 | # Please run kafka_test_setup.sh prior to executing this integration test.  | 
8 | 36 | describe "inputs/kafka", :integration => true do  | 
 | 37 | +  subject(:kafka_input) { LogStash::Inputs::Kafka.new(config) }  | 
 | 38 | +  let(:execution_context) { double("execution_context")}  | 
 | 39 | + | 
 | 40 | +  before :each do  | 
 | 41 | +    allow(kafka_input).to receive(:execution_context).and_return(execution_context)  | 
 | 42 | +    allow(execution_context).to receive(:pipeline_id).and_return(pipeline_id)  | 
 | 43 | +  end  | 
 | 44 | + | 
9 | 45 |   # Group ids to make sure that the consumers get all the logs.  | 
10 | 46 |   let(:group_id_1) {rand(36**8).to_s(36)}  | 
11 | 47 |   let(:group_id_2) {rand(36**8).to_s(36)}  | 
12 | 48 |   let(:group_id_3) {rand(36**8).to_s(36)}  | 
13 | 49 |   let(:group_id_4) {rand(36**8).to_s(36)}  | 
14 |  | -  let(:group_id_5) {rand(36**8).to_s(36)}  | 
15 |  | -  let(:plain_config) { { 'topics' => ['logstash_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_1, 'auto_offset_reset' => 'earliest'} }  | 
16 |  | -  let(:multi_consumer_config) { plain_config.merge({"group_id" => group_id_4, "client_id" => "spec", "consumer_threads" => 3}) }  | 
17 |  | -  let(:snappy_config) { { 'topics' => ['logstash_topic_snappy'], 'codec' => 'plain', 'group_id' => group_id_1, 'auto_offset_reset' => 'earliest'} }  | 
18 |  | -  let(:lz4_config) { { 'topics' => ['logstash_topic_lz4'], 'codec' => 'plain', 'group_id' => group_id_1, 'auto_offset_reset' => 'earliest'} }  | 
19 |  | -  let(:pattern_config) { { 'topics_pattern' => 'logstash_topic_.*', 'group_id' => group_id_2, 'codec' => 'plain', 'auto_offset_reset' => 'earliest'} }    | 
20 |  | -  let(:decorate_config) { { 'topics' => ['logstash_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_3, 'auto_offset_reset' => 'earliest', 'decorate_events' => true} }  | 
21 |  | -  let(:manual_commit_config) { { 'topics' => ['logstash_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_5, 'auto_offset_reset' => 'earliest', 'enable_auto_commit' => 'false'} }  | 
 | 50 | +  let(:pipeline_id) {rand(36**8).to_s(36)}  | 
 | 51 | +  let(:config) { { 'codec' => 'plain', 'auto_offset_reset' => 'earliest'}}  | 
22 | 52 |   let(:timeout_seconds) { 30 }  | 
23 | 53 |   let(:num_events) { 103 }  | 
 | 54 | +  let(:expected_num_events) { num_events }  | 
24 | 55 | 
 
  | 
25 |  | -  describe "#kafka-topics" do  | 
26 |  | -    def thread_it(kafka_input, queue)  | 
27 |  | -      Thread.new do  | 
28 |  | -        begin  | 
29 |  | -          kafka_input.run(queue)  | 
30 |  | -        end  | 
31 |  | -      end  | 
32 |  | -    end  | 
33 |  | - | 
34 |  | -    it "should consume all messages from plain 3-partition topic" do  | 
35 |  | -      kafka_input = LogStash::Inputs::Kafka.new(plain_config)  | 
36 |  | -      queue = Queue.new  | 
37 |  | -      t = thread_it(kafka_input, queue)  | 
38 |  | -      begin  | 
39 |  | -        t.run  | 
40 |  | -        wait(timeout_seconds).for {queue.length}.to eq(num_events)  | 
41 |  | -        expect(queue.length).to eq(num_events)  | 
42 |  | -      ensure  | 
43 |  | -        t.kill  | 
44 |  | -        t.join(30_000)  | 
45 |  | -      end  | 
46 |  | -    end  | 
47 |  | - | 
48 |  | -    it "should consume all messages from snappy 3-partition topic" do  | 
49 |  | -      kafka_input = LogStash::Inputs::Kafka.new(snappy_config)  | 
50 |  | -      queue = Queue.new  | 
51 |  | -      t = thread_it(kafka_input, queue)  | 
52 |  | -      begin  | 
53 |  | -        t.run  | 
54 |  | -        wait(timeout_seconds).for {queue.length}.to eq(num_events)  | 
55 |  | -        expect(queue.length).to eq(num_events)  | 
56 |  | -      ensure  | 
57 |  | -        t.kill  | 
58 |  | -        t.join(30_000)  | 
59 |  | -      end  | 
60 |  | -    end  | 
 | 56 | +  context 'from a plain 3 partition topic' do  | 
 | 57 | +    let(:config)  { super.merge({ 'topics' => ['logstash_topic_plain'], 'group_id' => group_id_1}) }  | 
 | 58 | +    it_behaves_like 'consumes all expected messages'  | 
 | 59 | +  end  | 
61 | 60 | 
 
  | 
62 |  | -    it "should consume all messages from lz4 3-partition topic" do  | 
63 |  | -      kafka_input = LogStash::Inputs::Kafka.new(lz4_config)  | 
64 |  | -      queue = Queue.new  | 
65 |  | -      t = thread_it(kafka_input, queue)  | 
66 |  | -      begin  | 
67 |  | -        t.run  | 
68 |  | -        wait(timeout_seconds).for {queue.length}.to eq(num_events)  | 
69 |  | -        expect(queue.length).to eq(num_events)  | 
70 |  | -      ensure  | 
71 |  | -        t.kill  | 
72 |  | -        t.join(30_000)  | 
73 |  | -      end  | 
74 |  | -    end  | 
 | 61 | +  context 'from snappy 3 partition topic' do  | 
 | 62 | +    let(:config) { { 'topics' => ['logstash_topic_snappy'], 'codec' => 'plain', 'group_id' => group_id_1, 'auto_offset_reset' => 'earliest'} }  | 
 | 63 | +    it_behaves_like 'consumes all expected messages'  | 
 | 64 | +  end  | 
75 | 65 | 
 
  | 
76 |  | -    it "should consumer all messages with multiple consumers" do  | 
77 |  | -      kafka_input = LogStash::Inputs::Kafka.new(multi_consumer_config)  | 
78 |  | -      queue = Queue.new  | 
79 |  | -      t = thread_it(kafka_input, queue)  | 
80 |  | -      begin  | 
81 |  | -        t.run  | 
82 |  | -        wait(timeout_seconds).for {queue.length}.to eq(num_events)  | 
83 |  | -        expect(queue.length).to eq(num_events)  | 
84 |  | -        kafka_input.kafka_consumers.each_with_index do |consumer, i|  | 
85 |  | -          expect(consumer.metrics.keys.first.tags["client-id"]).to eq("spec-#{i}-main")  | 
86 |  | -        end  | 
87 |  | -      ensure  | 
88 |  | -        t.kill  | 
89 |  | -        t.join(30_000)  | 
90 |  | -      end  | 
91 |  | -    end  | 
 | 66 | +  context 'from lz4 3 partition topic' do  | 
 | 67 | +    let(:config) { { 'topics' => ['logstash_topic_lz4'], 'codec' => 'plain', 'group_id' => group_id_1, 'auto_offset_reset' => 'earliest'} }  | 
 | 68 | +    it_behaves_like 'consumes all expected messages'  | 
92 | 69 |   end  | 
93 | 70 | 
 
  | 
94 |  | -  describe "#kafka-topics-pattern" do  | 
95 |  | -    def thread_it(kafka_input, queue)  | 
96 |  | -      Thread.new do  | 
97 |  | -        begin  | 
98 |  | -          kafka_input.run(queue)  | 
99 |  | -        end  | 
100 |  | -      end  | 
101 |  | -    end  | 
 | 71 | +  context 'manually committing' do  | 
 | 72 | +    let(:config) { { 'topics' => ['logstash_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_2, 'auto_offset_reset' => 'earliest', 'enable_auto_commit' => 'false'} }  | 
 | 73 | +    it_behaves_like 'consumes all expected messages'  | 
 | 74 | +  end  | 
102 | 75 | 
 
  | 
103 |  | -    it "should consume all messages from all 3 topics" do  | 
104 |  | -      kafka_input = LogStash::Inputs::Kafka.new(pattern_config)  | 
105 |  | -      queue = Queue.new  | 
106 |  | -      t = thread_it(kafka_input, queue)  | 
107 |  | -      begin  | 
108 |  | -        t.run  | 
109 |  | -        wait(timeout_seconds).for {queue.length}.to eq(3*num_events)  | 
110 |  | -        expect(queue.length).to eq(3*num_events)  | 
111 |  | -      ensure  | 
112 |  | -        t.kill  | 
113 |  | -        t.join(30_000)  | 
114 |  | -      end  | 
115 |  | -    end  | 
 | 76 | +  context 'using a pattern to consume from all 3 topics' do  | 
 | 77 | +    let(:config) { { 'topics_pattern' => 'logstash_topic_.*', 'group_id' => group_id_3, 'codec' => 'plain', 'auto_offset_reset' => 'earliest'} }  | 
 | 78 | +    let(:expected_num_events) { 3*num_events }  | 
 | 79 | +    it_behaves_like 'consumes all expected messages'  | 
116 | 80 |   end  | 
117 | 81 | 
 
  | 
118 |  | -  describe "#kafka-decorate" do  | 
119 |  | -    def thread_it(kafka_input, queue)  | 
120 |  | -      Thread.new do  | 
121 |  | -        begin  | 
122 |  | -          kafka_input.run(queue)  | 
 | 82 | +  context "with multiple consumers" do  | 
 | 83 | +    let(:config) { super.merge({'topics' => ['logstash_topic_plain'], "group_id" => group_id_4, "client_id" => "spec", "consumer_threads" => 3}) }  | 
 | 84 | +    it 'should should consume all messages' do  | 
 | 85 | +      run_with_kafka do |queue|  | 
 | 86 | +        expect(queue.length).to eq(num_events)  | 
 | 87 | +        kafka_input.kafka_consumers.each_with_index do |consumer, i|  | 
 | 88 | +          expect(consumer.metrics.keys.first.tags["client-id"]).to eq("spec-#{i}-#{pipeline_id}")  | 
123 | 89 |         end  | 
124 | 90 |       end  | 
125 | 91 |     end  | 
 | 92 | +  end  | 
126 | 93 | 
 
  | 
 | 94 | +  context 'with decorate events set to true' do  | 
 | 95 | +    let(:config) { { 'topics' => ['logstash_topic_plain'], 'codec' => 'plain', 'group_id' => group_id_3, 'auto_offset_reset' => 'earliest', 'decorate_events' => true} }  | 
127 | 96 |     it "should show the right topic and group name in decorated kafka section" do  | 
128 | 97 |       start = LogStash::Timestamp.now.time.to_i  | 
129 |  | -      kafka_input = LogStash::Inputs::Kafka.new(decorate_config)  | 
130 |  | -      queue = Queue.new  | 
131 |  | -      t = thread_it(kafka_input, queue)  | 
132 |  | -      begin  | 
133 |  | -        t.run  | 
134 |  | -        wait(timeout_seconds).for {queue.length}.to eq(num_events)  | 
 | 98 | +      run_with_kafka do  |queue|  | 
135 | 99 |         expect(queue.length).to eq(num_events)  | 
136 | 100 |         event = queue.shift  | 
137 | 101 |         expect(event.get("[@metadata][kafka][topic]")).to eq("logstash_topic_plain")  | 
138 | 102 |         expect(event.get("[@metadata][kafka][consumer_group]")).to eq(group_id_3)  | 
139 | 103 |         expect(event.get("[@metadata][kafka][timestamp]")).to be >= start  | 
140 |  | -      ensure  | 
141 |  | -        t.kill  | 
142 |  | -        t.join(30_000)  | 
143 |  | -      end  | 
144 |  | -    end  | 
145 |  | -  end  | 
146 |  | - | 
147 |  | -  describe "#kafka-offset-commit" do  | 
148 |  | -    def thread_it(kafka_input, queue)  | 
149 |  | -      Thread.new do  | 
150 |  | -        begin  | 
151 |  | -          kafka_input.run(queue)  | 
152 |  | -        end  | 
153 |  | -      end  | 
154 |  | -    end  | 
155 |  | - | 
156 |  | -    it "should manually commit offsets" do  | 
157 |  | -      kafka_input = LogStash::Inputs::Kafka.new(manual_commit_config)  | 
158 |  | -      queue = Queue.new  | 
159 |  | -      t = thread_it(kafka_input, queue)  | 
160 |  | -      begin  | 
161 |  | -        t.run  | 
162 |  | -        wait(timeout_seconds).for {queue.length}.to eq(num_events)  | 
163 |  | -        expect(queue.length).to eq(num_events)  | 
164 |  | -      ensure  | 
165 |  | -        t.kill  | 
166 |  | -        t.join(30_000)  | 
167 | 104 |       end  | 
168 | 105 |     end  | 
169 | 106 |   end  | 
 | 
0 commit comments