Skip to content

Commit a30c1ac

Browse files
Refactored because of deprecation of TopicPartitionCounter. Moved functionality to flush. Fixes #43. (#44)
1 parent 139ea9f commit a30c1ac

File tree

4 files changed

+95
-23
lines changed

4 files changed

+95
-23
lines changed

pom.xml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,10 @@
6363
<artifactId>lettuce-core</artifactId>
6464
<version>5.2.1.RELEASE</version>
6565
</dependency>
66+
<dependency>
67+
<groupId>com.github.jcustenborder.kafka.connect</groupId>
68+
<artifactId>connect-utils-jackson</artifactId>
69+
</dependency>
6670
</dependencies>
6771
<build>
6872
<plugins>

src/main/java/com/github/jcustenborder/kafka/connect/redis/RedisSinkTask.java

Lines changed: 35 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,11 @@
1717

1818
import com.fasterxml.jackson.core.JsonProcessingException;
1919
import com.github.jcustenborder.kafka.connect.utils.VersionUtil;
20-
import com.github.jcustenborder.kafka.connect.utils.data.SinkOffsetState;
21-
import com.github.jcustenborder.kafka.connect.utils.data.TopicPartitionCounter;
2220
import com.github.jcustenborder.kafka.connect.utils.jackson.ObjectMapperFactory;
2321
import com.google.common.base.Charsets;
2422
import io.lettuce.core.KeyValue;
2523
import io.lettuce.core.RedisFuture;
24+
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
2625
import org.apache.kafka.common.TopicPartition;
2726
import org.apache.kafka.connect.errors.DataException;
2827
import org.apache.kafka.connect.errors.RetriableException;
@@ -146,8 +145,6 @@ public void put(Collection<SinkRecord> records) {
146145

147146
SinkOperation operation = SinkOperation.NONE;
148147

149-
TopicPartitionCounter counter = new TopicPartitionCounter();
150-
151148
for (SinkRecord record : records) {
152149
log.trace("put() - Processing record " + formatLocation(record));
153150
if (null == record.key()) {
@@ -182,7 +179,6 @@ public void put(Collection<SinkRecord> records) {
182179
operations.add(operation);
183180
}
184181
operation.add(key, value);
185-
counter.increment(record.topic(), record.kafkaPartition(), record.kafkaOffset());
186182
}
187183

188184
log.debug(
@@ -191,33 +187,50 @@ public void put(Collection<SinkRecord> records) {
191187
records.size()
192188
);
193189

194-
final List<SinkOffsetState> offsetData = counter.offsetStates();
195-
if (!offsetData.isEmpty()) {
196-
operation = SinkOperation.create(SinkOperation.Type.SET, this.config, offsetData.size());
197-
operations.add(operation);
198-
for (SinkOffsetState e : offsetData) {
199-
final byte[] key = String.format("__kafka.offset.%s.%s", e.topic(), e.partition()).getBytes(Charsets.UTF_8);
200-
final byte[] value;
201-
try {
202-
value = ObjectMapperFactory.INSTANCE.writeValueAsBytes(e);
203-
} catch (JsonProcessingException e1) {
204-
throw new DataException(e1);
205-
}
206-
operation.add(key, value);
207-
log.trace("put() - Setting offset: {}", e);
208-
}
209-
}
210-
211190
for (SinkOperation op : operations) {
212191
log.debug("put() - Executing {} operation with {} values", op.type, op.size());
213192
try {
214193
op.execute(this.session.asyncCommands());
215194
} catch (InterruptedException e) {
195+
log.warn("Exception thrown while executing operation", e);
216196
throw new RetriableException(e);
217197
}
218198
}
219199
}
220200

201+
@Override
202+
public void flush(Map<TopicPartition, OffsetAndMetadata> currentOffsets) {
203+
SinkOperation operation = SinkOperation.create(SinkOperation.Type.SET, this.config, currentOffsets.size());
204+
205+
List<SinkOffsetState> states = currentOffsets
206+
.entrySet().stream()
207+
.map(e -> ImmutableSinkOffsetState.builder()
208+
.topic(e.getKey().topic())
209+
.partition(e.getKey().partition())
210+
.offset(e.getValue().offset())
211+
.build()
212+
).collect(Collectors.toList());
213+
214+
for (SinkOffsetState e : states) {
215+
final byte[] key = String.format("__kafka.offset.%s.%s", e.topic(), e.partition()).getBytes(Charsets.UTF_8);
216+
final byte[] value;
217+
try {
218+
value = ObjectMapperFactory.INSTANCE.writeValueAsBytes(e);
219+
} catch (JsonProcessingException e1) {
220+
throw new DataException(e1);
221+
}
222+
operation.add(key, value);
223+
log.trace("put() - Setting offset: {}", e);
224+
}
225+
226+
try {
227+
operation.execute(this.session.asyncCommands());
228+
} catch (InterruptedException e) {
229+
log.warn("Exception thrown while executing operation", e);
230+
throw new RetriableException(e);
231+
}
232+
}
233+
221234
private static String redisOffsetKey(TopicPartition topicPartition) {
222235
return String.format("__kafka.offset.%s.%s", topicPartition.topic(), topicPartition.partition());
223236
}
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
/**
2+
* Copyright © 2017 Jeremy Custenborder ([email protected])
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
package com.github.jcustenborder.kafka.connect.redis;
17+
18+
import com.fasterxml.jackson.annotation.JsonAutoDetect;
19+
import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility;
20+
import com.fasterxml.jackson.annotation.JsonIgnore;
21+
import com.fasterxml.jackson.annotation.JsonProperty;
22+
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
23+
import org.apache.kafka.common.TopicPartition;
24+
import org.immutables.value.Value;
25+
26+
@Value.Immutable
27+
@JsonDeserialize(as = ImmutableSinkOffsetState.class)
28+
@JsonAutoDetect(
29+
fieldVisibility = Visibility.NONE,
30+
getterVisibility = Visibility.NONE,
31+
setterVisibility = Visibility.NONE,
32+
isGetterVisibility = Visibility.NONE,
33+
creatorVisibility = Visibility.NONE)
34+
public interface SinkOffsetState {
35+
@JsonProperty("topic")
36+
String topic();
37+
38+
@JsonProperty("partition")
39+
Integer partition();
40+
41+
@JsonProperty("offset")
42+
Long offset();
43+
44+
@JsonIgnore
45+
@Value.Derived
46+
default TopicPartition topicPartition() {
47+
return new TopicPartition(topic(), partition());
48+
}
49+
}

src/test/java/com/github/jcustenborder/kafka/connect/redis/RedisSinkTaskTest.java

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121
import io.lettuce.core.RedisFuture;
2222
import io.lettuce.core.cluster.api.async.RedisAdvancedClusterAsyncCommands;
2323
import io.lettuce.core.cluster.api.async.RedisClusterAsyncCommands;
24+
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
25+
import org.apache.kafka.common.TopicPartition;
2426
import org.apache.kafka.connect.data.Schema;
2527
import org.apache.kafka.connect.data.SchemaAndValue;
2628
import org.apache.kafka.connect.errors.DataException;
@@ -48,6 +50,8 @@
4850
public class RedisSinkTaskTest {
4951
long offset = 1;
5052

53+
SinkRecord lastRecord;
54+
5155
SinkRecord record(String k, String v) {
5256
final byte[] key = k.getBytes(Charsets.UTF_8);
5357
final Schema keySchema = Schema.BYTES_SCHEMA;
@@ -62,7 +66,7 @@ SinkRecord record(String k, String v) {
6266
valueSchema = Schema.BYTES_SCHEMA;
6367
}
6468

65-
return new SinkRecord(
69+
return lastRecord = new SinkRecord(
6670
"topic",
6771
1,
6872
keySchema,
@@ -147,6 +151,8 @@ public void put() throws InterruptedException {
147151
InOrder inOrder = Mockito.inOrder(asyncCommands);
148152
inOrder.verify(asyncCommands).mset(anyMap());
149153
inOrder.verify(asyncCommands).del(any(byte[].class));
154+
155+
task.flush(ImmutableMap.of(new TopicPartition(lastRecord.topic(), lastRecord.kafkaPartition()), new OffsetAndMetadata(lastRecord.kafkaOffset())));
150156
inOrder.verify(asyncCommands, times(2)).mset(anyMap());
151157
}
152158

0 commit comments

Comments
 (0)