Merge branch 'awslabs:master' into KCL2XReshardTests
This commit is contained in:
commit
a207c3760c
88 changed files with 634 additions and 475 deletions
11
CHANGELOG.md
11
CHANGELOG.md
|
|
@ -3,6 +3,17 @@
|
||||||
For **1.x** release notes, please see [v1.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v1.x/CHANGELOG.md)
|
For **1.x** release notes, please see [v1.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v1.x/CHANGELOG.md)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
### Release 2.5.1 (June 27, 2023)
|
||||||
|
* [#1143](https://github.com/awslabs/amazon-kinesis-client/pull/1143) Upgrade MultiLangDaemon to support StreamARN
|
||||||
|
* [#1145](https://github.com/awslabs/amazon-kinesis-client/pull/1145) Introduced GitHub actions to trigger Maven builds during merge/pull requests
|
||||||
|
* [#1136](https://github.com/awslabs/amazon-kinesis-client/pull/1136) Added testing architecture and KCL 2.x basic polling/streaming tests
|
||||||
|
* [#1153](https://github.com/awslabs/amazon-kinesis-client/pull/1153) Checkstyle: added `UnusedImports` check.
|
||||||
|
* [#1150](https://github.com/awslabs/amazon-kinesis-client/pull/1150) Enabled Checkstyle validation of test resources.
|
||||||
|
* [#1149](https://github.com/awslabs/amazon-kinesis-client/pull/1149) Bound Checkstyle to `validate` goal for automated enforcement.
|
||||||
|
* [#1148](https://github.com/awslabs/amazon-kinesis-client/pull/1148) Code cleanup to faciliate Checkstyle enforcement.
|
||||||
|
* [#1142](https://github.com/awslabs/amazon-kinesis-client/pull/1142) Upgrade Google Guava dependency version from 31.1-jre to 32.0.0-jre
|
||||||
|
* [#1115](https://github.com/awslabs/amazon-kinesis-client/pull/1115) Update KCL version from 2.5.0 to 2.5.1-SNAPSHOT
|
||||||
|
|
||||||
### Release 2.5.0 (May 19, 2023)
|
### Release 2.5.0 (May 19, 2023)
|
||||||
* **[#1109](https://github.com/awslabs/amazon-kinesis-client/pull/1109) Add support for stream ARNs**
|
* **[#1109](https://github.com/awslabs/amazon-kinesis-client/pull/1109) Add support for stream ARNs**
|
||||||
* **[#1065](https://github.com/awslabs/amazon-kinesis-client/pull/1065) Allow tags to be added when lease table is created**
|
* **[#1065](https://github.com/awslabs/amazon-kinesis-client/pull/1065) Allow tags to be added when lease table is created**
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,7 @@ The recommended way to use the KCL for Java is to consume it from Maven.
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>software.amazon.kinesis</groupId>
|
<groupId>software.amazon.kinesis</groupId>
|
||||||
<artifactId>amazon-kinesis-client</artifactId>
|
<artifactId>amazon-kinesis-client</artifactId>
|
||||||
<version>2.4.8</version>
|
<version>2.5.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<artifactId>amazon-kinesis-client-pom</artifactId>
|
<artifactId>amazon-kinesis-client-pom</artifactId>
|
||||||
<groupId>software.amazon.kinesis</groupId>
|
<groupId>software.amazon.kinesis</groupId>
|
||||||
<version>2.5.1-SNAPSHOT</version>
|
<version>2.5.1</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,6 @@ import java.util.concurrent.ExecutorService;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration;
|
|
||||||
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
||||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
||||||
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
||||||
|
|
|
||||||
|
|
@ -22,8 +22,6 @@ import java.util.List;
|
||||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||||
import com.amazonaws.auth.AWSCredentialsProviderChain;
|
import com.amazonaws.auth.AWSCredentialsProviderChain;
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get AWSCredentialsProvider property.
|
* Get AWSCredentialsProvider property.
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,6 @@ import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
import lombok.Getter;
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
import org.apache.commons.beanutils.ConvertUtilsBean;
|
||||||
|
|
@ -150,7 +149,6 @@ public class BuilderDynaBean implements DynaBean {
|
||||||
} else {
|
} else {
|
||||||
return expected.cast(dynaBeanCreateSupport.build());
|
return expected.cast(dynaBeanCreateSupport.build());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void validateResolvedEmptyHandler() {
|
private void validateResolvedEmptyHandler() {
|
||||||
|
|
|
||||||
|
|
@ -23,8 +23,9 @@ import org.apache.commons.beanutils.BeanUtilsBean;
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
import org.apache.commons.beanutils.ConvertUtilsBean;
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.commons.lang3.Validate;
|
import org.apache.commons.lang3.Validate;
|
||||||
|
import software.amazon.awssdk.arns.Arn;
|
||||||
|
import software.amazon.kinesis.common.StreamIdentifier;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* KinesisClientLibConfigurator constructs a KinesisClientLibConfiguration from java properties file. The following
|
* KinesisClientLibConfigurator constructs a KinesisClientLibConfiguration from java properties file. The following
|
||||||
|
|
@ -40,7 +41,6 @@ public class KinesisClientLibConfigurator {
|
||||||
private final BeanUtilsBean utilsBean;
|
private final BeanUtilsBean utilsBean;
|
||||||
private final MultiLangDaemonConfiguration configuration;
|
private final MultiLangDaemonConfiguration configuration;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor.
|
* Constructor.
|
||||||
*/
|
*/
|
||||||
|
|
@ -69,8 +69,18 @@ public class KinesisClientLibConfigurator {
|
||||||
});
|
});
|
||||||
|
|
||||||
Validate.notBlank(configuration.getApplicationName(), "Application name is required");
|
Validate.notBlank(configuration.getApplicationName(), "Application name is required");
|
||||||
Validate.notBlank(configuration.getStreamName(), "Stream name is required");
|
|
||||||
|
if (configuration.getStreamArn() != null && !configuration.getStreamArn().trim().isEmpty()) {
|
||||||
|
final Arn streamArnObj = Arn.fromString(configuration.getStreamArn());
|
||||||
|
StreamIdentifier.validateArn(streamArnObj);
|
||||||
|
//Parse out the stream Name from the Arn (and/or override existing value for Stream Name)
|
||||||
|
final String streamNameFromArn = streamArnObj.resource().resource();
|
||||||
|
configuration.setStreamName(streamNameFromArn);
|
||||||
|
}
|
||||||
|
|
||||||
|
Validate.notBlank(configuration.getStreamName(), "Stream name or Stream Arn is required. Stream Arn takes precedence if both are passed in.");
|
||||||
Validate.isTrue(configuration.getKinesisCredentialsProvider().isDirty(), "A basic set of AWS credentials must be provided");
|
Validate.isTrue(configuration.getKinesisCredentialsProvider().isDirty(), "A basic set of AWS credentials must be provided");
|
||||||
|
|
||||||
return configuration;
|
return configuration;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -96,5 +106,4 @@ public class KinesisClientLibConfigurator {
|
||||||
return getConfiguration(properties);
|
return getConfiguration(properties);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,6 @@ import java.util.UUID;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
|
||||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
import org.apache.commons.beanutils.BeanUtilsBean;
|
||||||
import org.apache.commons.beanutils.ConvertUtils;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
import org.apache.commons.beanutils.ConvertUtilsBean;
|
||||||
import org.apache.commons.beanutils.Converter;
|
import org.apache.commons.beanutils.Converter;
|
||||||
import org.apache.commons.beanutils.converters.ArrayConverter;
|
import org.apache.commons.beanutils.converters.ArrayConverter;
|
||||||
|
|
@ -73,6 +72,8 @@ public class MultiLangDaemonConfiguration {
|
||||||
private String applicationName;
|
private String applicationName;
|
||||||
|
|
||||||
private String streamName;
|
private String streamName;
|
||||||
|
private String streamArn;
|
||||||
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigsBuilder.class)
|
@ConfigurationSettable(configurationClass = ConfigsBuilder.class)
|
||||||
private String tableName;
|
private String tableName;
|
||||||
|
|
@ -157,7 +158,6 @@ public class MultiLangDaemonConfiguration {
|
||||||
metricsEnabledDimensions = new HashSet<>(Arrays.asList(dimensions));
|
metricsEnabledDimensions = new HashSet<>(Arrays.asList(dimensions));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private RetrievalMode retrievalMode = RetrievalMode.DEFAULT;
|
private RetrievalMode retrievalMode = RetrievalMode.DEFAULT;
|
||||||
|
|
||||||
private final FanoutConfigBean fanoutConfig = new FanoutConfigBean();
|
private final FanoutConfigBean fanoutConfig = new FanoutConfigBean();
|
||||||
|
|
@ -169,7 +169,6 @@ public class MultiLangDaemonConfiguration {
|
||||||
private long shutdownGraceMillis;
|
private long shutdownGraceMillis;
|
||||||
private Integer timeoutInSeconds;
|
private Integer timeoutInSeconds;
|
||||||
|
|
||||||
|
|
||||||
private final BuilderDynaBean kinesisCredentialsProvider;
|
private final BuilderDynaBean kinesisCredentialsProvider;
|
||||||
|
|
||||||
public void setAWSCredentialsProvider(String providerString) {
|
public void setAWSCredentialsProvider(String providerString) {
|
||||||
|
|
|
||||||
|
|
@ -28,16 +28,15 @@ import org.mockito.Mockito;
|
||||||
import org.mockito.invocation.InvocationOnMock;
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
import org.mockito.stubbing.Answer;
|
import org.mockito.stubbing.Answer;
|
||||||
|
|
||||||
import software.amazon.kinesis.multilang.MessageReader;
|
|
||||||
import software.amazon.kinesis.multilang.messages.Message;
|
import software.amazon.kinesis.multilang.messages.Message;
|
||||||
import software.amazon.kinesis.multilang.messages.StatusMessage;
|
import software.amazon.kinesis.multilang.messages.StatusMessage;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
public class MessageReaderTest {
|
public class MessageReaderTest {
|
||||||
|
|
||||||
private static final String shardId = "shard-123";
|
private static final String SHARD_ID = "shard-123";
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* This line is based on the definition of the protocol for communication between the KCL record processor and
|
* This line is based on the definition of the protocol for communication between the KCL record processor and
|
||||||
* the client's process.
|
* the client's process.
|
||||||
*/
|
*/
|
||||||
|
|
@ -45,7 +44,7 @@ public class MessageReaderTest {
|
||||||
return String.format("{\"action\":\"checkpoint\", \"checkpoint\":\"%s\"}", sequenceNumber);
|
return String.format("{\"action\":\"checkpoint\", \"checkpoint\":\"%s\"}", sequenceNumber);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* This line is based on the definition of the protocol for communication between the KCL record processor and
|
* This line is based on the definition of the protocol for communication between the KCL record processor and
|
||||||
* the client's process.
|
* the client's process.
|
||||||
*/
|
*/
|
||||||
|
|
@ -80,10 +79,9 @@ public class MessageReaderTest {
|
||||||
String[] responseFors = new String[] { "initialize", "processRecords", "processRecords", "shutdown" };
|
String[] responseFors = new String[] { "initialize", "processRecords", "processRecords", "shutdown" };
|
||||||
InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors);
|
InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors);
|
||||||
MessageReader reader =
|
MessageReader reader =
|
||||||
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
|
|
||||||
for (String responseFor : responseFors) {
|
for (String responseFor : responseFors) {
|
||||||
StatusMessage statusMessage = null;
|
|
||||||
try {
|
try {
|
||||||
Message message = reader.getNextMessageFromSTDOUT().get();
|
Message message = reader.getNextMessageFromSTDOUT().get();
|
||||||
if (message instanceof StatusMessage) {
|
if (message instanceof StatusMessage) {
|
||||||
|
|
@ -103,14 +101,14 @@ public class MessageReaderTest {
|
||||||
InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors);
|
InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors);
|
||||||
|
|
||||||
MessageReader reader =
|
MessageReader reader =
|
||||||
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
Future<Boolean> drainFuture = reader.drainSTDOUT();
|
Future<Boolean> drainFuture = reader.drainSTDOUT();
|
||||||
Boolean drainResult = drainFuture.get();
|
Boolean drainResult = drainFuture.get();
|
||||||
Assert.assertNotNull(drainResult);
|
Assert.assertNotNull(drainResult);
|
||||||
Assert.assertTrue(drainResult);
|
Assert.assertTrue(drainResult);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* readValue should fail safely and just continue looping
|
* readValue should fail safely and just continue looping
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -135,7 +133,7 @@ public class MessageReaderTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
MessageReader reader =
|
MessageReader reader =
|
||||||
new MessageReader().initialize(bufferReader, shardId, new ObjectMapper(),
|
new MessageReader().initialize(bufferReader, SHARD_ID, new ObjectMapper(),
|
||||||
Executors.newCachedThreadPool());
|
Executors.newCachedThreadPool());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
@ -150,7 +148,7 @@ public class MessageReaderTest {
|
||||||
public void messageReaderBuilderTest() {
|
public void messageReaderBuilderTest() {
|
||||||
InputStream stream = new ByteArrayInputStream("".getBytes());
|
InputStream stream = new ByteArrayInputStream("".getBytes());
|
||||||
MessageReader reader =
|
MessageReader reader =
|
||||||
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
Assert.assertNotNull(reader);
|
Assert.assertNotNull(reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -159,7 +157,7 @@ public class MessageReaderTest {
|
||||||
BufferedReader input = Mockito.mock(BufferedReader.class);
|
BufferedReader input = Mockito.mock(BufferedReader.class);
|
||||||
Mockito.doThrow(IOException.class).when(input).readLine();
|
Mockito.doThrow(IOException.class).when(input).readLine();
|
||||||
MessageReader reader =
|
MessageReader reader =
|
||||||
new MessageReader().initialize(input, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
new MessageReader().initialize(input, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
|
|
||||||
Future<Message> readTask = reader.getNextMessageFromSTDOUT();
|
Future<Message> readTask = reader.getNextMessageFromSTDOUT();
|
||||||
|
|
||||||
|
|
@ -177,7 +175,7 @@ public class MessageReaderTest {
|
||||||
public void noMoreMessagesTest() throws InterruptedException {
|
public void noMoreMessagesTest() throws InterruptedException {
|
||||||
InputStream stream = new ByteArrayInputStream("".getBytes());
|
InputStream stream = new ByteArrayInputStream("".getBytes());
|
||||||
MessageReader reader =
|
MessageReader reader =
|
||||||
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
Future<Message> future = reader.getNextMessageFromSTDOUT();
|
Future<Message> future = reader.getNextMessageFromSTDOUT();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
|
||||||
|
|
@ -32,35 +32,30 @@ import org.mockito.Mockito;
|
||||||
|
|
||||||
import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
|
import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
|
||||||
import software.amazon.kinesis.lifecycle.events.ShardEndedInput;
|
import software.amazon.kinesis.lifecycle.events.ShardEndedInput;
|
||||||
import software.amazon.kinesis.multilang.MessageWriter;
|
|
||||||
import software.amazon.kinesis.multilang.messages.LeaseLostMessage;
|
|
||||||
import software.amazon.kinesis.multilang.messages.Message;
|
import software.amazon.kinesis.multilang.messages.Message;
|
||||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||||
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
||||||
import software.amazon.kinesis.lifecycle.ShutdownReason;
|
|
||||||
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
||||||
|
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
|
|
||||||
public class MessageWriterTest {
|
public class MessageWriterTest {
|
||||||
|
|
||||||
private static final String shardId = "shard-123";
|
private static final String SHARD_ID = "shard-123";
|
||||||
MessageWriter messageWriter;
|
MessageWriter messageWriter;
|
||||||
OutputStream stream;
|
OutputStream stream;
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public final ExpectedException thrown = ExpectedException.none();
|
public final ExpectedException thrown = ExpectedException.none();
|
||||||
|
|
||||||
// ExecutorService executor;
|
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() {
|
public void setup() {
|
||||||
stream = Mockito.mock(OutputStream.class);
|
stream = Mockito.mock(OutputStream.class);
|
||||||
messageWriter =
|
messageWriter =
|
||||||
new MessageWriter().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
new MessageWriter().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -86,7 +81,7 @@ public class MessageWriterTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void writeInitializeMessageTest() throws IOException, InterruptedException, ExecutionException {
|
public void writeInitializeMessageTest() throws IOException, InterruptedException, ExecutionException {
|
||||||
Future<Boolean> future = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build());
|
Future<Boolean> future = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(SHARD_ID).build());
|
||||||
future.get();
|
future.get();
|
||||||
verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(),
|
verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(),
|
||||||
Mockito.anyInt());
|
Mockito.anyInt());
|
||||||
|
|
@ -131,20 +126,20 @@ public class MessageWriterTest {
|
||||||
@Test
|
@Test
|
||||||
public void streamIOExceptionTest() throws IOException, InterruptedException, ExecutionException {
|
public void streamIOExceptionTest() throws IOException, InterruptedException, ExecutionException {
|
||||||
Mockito.doThrow(IOException.class).when(stream).flush();
|
Mockito.doThrow(IOException.class).when(stream).flush();
|
||||||
Future<Boolean> initializeTask = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build());
|
Future<Boolean> initializeTask = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(SHARD_ID).build());
|
||||||
Boolean result = initializeTask.get();
|
Boolean result = initializeTask.get();
|
||||||
Assert.assertNotNull(result);
|
Assert.assertNotNull(result);
|
||||||
Assert.assertFalse(result);
|
Assert.assertFalse(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void objectMapperFails() throws JsonProcessingException, InterruptedException, ExecutionException {
|
public void objectMapperFails() throws JsonProcessingException {
|
||||||
thrown.expect(RuntimeException.class);
|
thrown.expect(RuntimeException.class);
|
||||||
thrown.expectMessage("Encountered I/O error while writing LeaseLostMessage action to subprocess");
|
thrown.expectMessage("Encountered I/O error while writing LeaseLostMessage action to subprocess");
|
||||||
|
|
||||||
ObjectMapper mapper = Mockito.mock(ObjectMapper.class);
|
ObjectMapper mapper = Mockito.mock(ObjectMapper.class);
|
||||||
Mockito.doThrow(JsonProcessingException.class).when(mapper).writeValueAsString(Mockito.any(Message.class));
|
Mockito.doThrow(JsonProcessingException.class).when(mapper).writeValueAsString(Mockito.any(Message.class));
|
||||||
messageWriter = new MessageWriter().initialize(stream, shardId, mapper, Executors.newCachedThreadPool());
|
messageWriter = new MessageWriter().initialize(stream, SHARD_ID, mapper, Executors.newCachedThreadPool());
|
||||||
|
|
||||||
messageWriter.writeLeaseLossMessage(LeaseLostInput.builder().build());
|
messageWriter.writeLeaseLossMessage(LeaseLostInput.builder().build());
|
||||||
}
|
}
|
||||||
|
|
@ -157,7 +152,7 @@ public class MessageWriterTest {
|
||||||
Assert.assertFalse(this.messageWriter.isOpen());
|
Assert.assertFalse(this.messageWriter.isOpen());
|
||||||
try {
|
try {
|
||||||
// Any message should fail
|
// Any message should fail
|
||||||
this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build());
|
this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(SHARD_ID).build());
|
||||||
Assert.fail("MessageWriter should be closed and unable to write.");
|
Assert.fail("MessageWriter should be closed and unable to write.");
|
||||||
} catch (IllegalStateException e) {
|
} catch (IllegalStateException e) {
|
||||||
// This should happen.
|
// This should happen.
|
||||||
|
|
|
||||||
|
|
@ -14,17 +14,14 @@
|
||||||
*/
|
*/
|
||||||
package software.amazon.kinesis.multilang;
|
package software.amazon.kinesis.multilang;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.mockito.Matchers.any;
|
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Properties;
|
|
||||||
|
|
||||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
import software.amazon.awssdk.regions.Region;
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
import org.mockito.Mock;
|
import org.mockito.Mock;
|
||||||
|
|
@ -39,58 +36,163 @@ import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
public class MultiLangDaemonConfigTest {
|
public class MultiLangDaemonConfigTest {
|
||||||
private static String FILENAME = "some.properties";
|
private static final String FILENAME = "some.properties";
|
||||||
|
private static final String EXE = "TestExe.exe";
|
||||||
|
private static final String APPLICATION_NAME = MultiLangDaemonConfigTest.class.getSimpleName();
|
||||||
|
private static final String STREAM_NAME = "fakeStream";
|
||||||
|
private static final String STREAM_NAME_IN_ARN = "FAKE_STREAM_NAME";
|
||||||
|
private static final Region REGION = Region.US_EAST_1;
|
||||||
|
private static final String STREAM_ARN = "arn:aws:kinesis:us-east-2:012345678987:stream/" + STREAM_NAME_IN_ARN;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private ClassLoader classLoader;
|
||||||
|
|
||||||
@Mock
|
@Mock
|
||||||
private AwsCredentialsProvider credentialsProvider;
|
private AwsCredentialsProvider credentialsProvider;
|
||||||
@Mock
|
@Mock
|
||||||
private AwsCredentials creds;
|
private AwsCredentials creds;
|
||||||
@Mock
|
|
||||||
private KinesisClientLibConfigurator configurator;
|
private KinesisClientLibConfigurator configurator;
|
||||||
|
private MultiLangDaemonConfig deamonConfig;
|
||||||
|
|
||||||
@Before
|
/**
|
||||||
public void setup() {
|
* Instantiate a MultiLangDaemonConfig object
|
||||||
ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean();
|
* @param streamName
|
||||||
BeanUtilsBean utilsBean = new BeanUtilsBean(convertUtilsBean);
|
* @param streamArn
|
||||||
MultiLangDaemonConfiguration multiLangDaemonConfiguration = new MultiLangDaemonConfiguration(utilsBean,
|
* @throws IOException
|
||||||
convertUtilsBean);
|
*/
|
||||||
multiLangDaemonConfiguration.setApplicationName("cool-app");
|
public void setup(String streamName, String streamArn) throws IOException {
|
||||||
multiLangDaemonConfiguration.setStreamName("cool-stream");
|
|
||||||
multiLangDaemonConfiguration.setWorkerIdentifier("cool-worker");
|
String properties = String.format("executableName = %s\n"
|
||||||
when(credentialsProvider.resolveCredentials()).thenReturn(creds);
|
+ "applicationName = %s\n"
|
||||||
when(creds.accessKeyId()).thenReturn("cool-user");
|
+ "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n"
|
||||||
when(configurator.getConfiguration(any(Properties.class))).thenReturn(multiLangDaemonConfiguration);
|
+ "processingLanguage = malbolge\n"
|
||||||
|
+ "regionName = %s\n",
|
||||||
|
EXE,
|
||||||
|
APPLICATION_NAME,
|
||||||
|
"us-east-1");
|
||||||
|
|
||||||
|
if (streamName != null) {
|
||||||
|
properties += String.format("streamName = %s\n", streamName);
|
||||||
}
|
}
|
||||||
|
if (streamArn != null) {
|
||||||
|
properties += String.format("streamArn = %s\n", streamArn);
|
||||||
|
}
|
||||||
|
classLoader = Mockito.mock(ClassLoader.class);
|
||||||
|
|
||||||
@Test
|
Mockito.doReturn(new ByteArrayInputStream(properties.getBytes())).when(classLoader)
|
||||||
public void constructorTest() throws IOException {
|
|
||||||
String PROPERTIES = "executableName = randomEXE \n" + "applicationName = testApp \n"
|
|
||||||
+ "streamName = fakeStream \n" + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n"
|
|
||||||
+ "processingLanguage = malbolge";
|
|
||||||
ClassLoader classLoader = Mockito.mock(ClassLoader.class);
|
|
||||||
|
|
||||||
Mockito.doReturn(new ByteArrayInputStream(PROPERTIES.getBytes())).when(classLoader)
|
|
||||||
.getResourceAsStream(FILENAME);
|
.getResourceAsStream(FILENAME);
|
||||||
|
|
||||||
MultiLangDaemonConfig deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
when(credentialsProvider.resolveCredentials()).thenReturn(creds);
|
||||||
|
when(creds.accessKeyId()).thenReturn("cool-user");
|
||||||
|
configurator = new KinesisClientLibConfigurator();
|
||||||
|
|
||||||
assertNotNull(deamonConfig.getExecutorService());
|
deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
||||||
assertNotNull(deamonConfig.getMultiLangDaemonConfiguration());
|
}
|
||||||
assertNotNull(deamonConfig.getRecordProcessorFactory());
|
|
||||||
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public void testConstructorFailsBecauseStreamArnIsInvalid() throws Exception {
|
||||||
|
setup("", "this_is_not_a_valid_arn");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public void testConstructorFailsBecauseStreamArnIsInvalid2() throws Exception {
|
||||||
|
setup("", "arn:aws:kinesis:us-east-2:ACCOUNT_ID:BadFormatting:stream/" + STREAM_NAME_IN_ARN);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public void testConstructorFailsBecauseStreamNameAndArnAreEmpty() throws Exception {
|
||||||
|
setup("", "");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = NullPointerException.class)
|
||||||
|
public void testConstructorFailsBecauseStreamNameAndArnAreNull() throws Exception {
|
||||||
|
setup(null, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = NullPointerException.class)
|
||||||
|
public void testConstructorFailsBecauseStreamNameIsNullAndArnIsEmpty() throws Exception {
|
||||||
|
setup(null, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public void testConstructorFailsBecauseStreamNameIsEmptyAndArnIsNull() throws Exception {
|
||||||
|
setup("", null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void propertyValidation() {
|
public void testConstructorUsingStreamName() throws IOException {
|
||||||
String PROPERTIES_NO_EXECUTABLE_NAME = "applicationName = testApp \n" + "streamName = fakeStream \n"
|
setup(STREAM_NAME, null);
|
||||||
|
|
||||||
|
assertConfigurationsMatch(STREAM_NAME, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConstructorUsingStreamNameAndStreamArnIsEmpty() throws IOException {
|
||||||
|
setup(STREAM_NAME, "");
|
||||||
|
|
||||||
|
assertConfigurationsMatch(STREAM_NAME, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConstructorUsingStreamNameAndStreamArnIsWhitespace() throws IOException {
|
||||||
|
setup(STREAM_NAME, " ");
|
||||||
|
|
||||||
|
assertConfigurationsMatch(STREAM_NAME, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConstructorUsingStreamArn() throws IOException {
|
||||||
|
setup(null, STREAM_ARN);
|
||||||
|
|
||||||
|
assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConstructorUsingStreamNameAsEmptyAndStreamArn() throws IOException {
|
||||||
|
setup("", STREAM_ARN);
|
||||||
|
|
||||||
|
assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConstructorUsingStreamArnOverStreamName() throws IOException {
|
||||||
|
setup(STREAM_NAME, STREAM_ARN);
|
||||||
|
|
||||||
|
assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify the daemonConfig properties are what we expect them to be.
|
||||||
|
*
|
||||||
|
* @param expectedStreamName
|
||||||
|
*/
|
||||||
|
private void assertConfigurationsMatch(String expectedStreamName, String expectedStreamArn) {
|
||||||
|
final MultiLangDaemonConfiguration multiLangConfiguration = deamonConfig.getMultiLangDaemonConfiguration();
|
||||||
|
assertNotNull(deamonConfig.getExecutorService());
|
||||||
|
assertNotNull(multiLangConfiguration);
|
||||||
|
assertNotNull(deamonConfig.getRecordProcessorFactory());
|
||||||
|
|
||||||
|
assertEquals(EXE, deamonConfig.getRecordProcessorFactory().getCommandArray()[0]);
|
||||||
|
assertEquals(APPLICATION_NAME, multiLangConfiguration.getApplicationName());
|
||||||
|
assertEquals(expectedStreamName, multiLangConfiguration.getStreamName());
|
||||||
|
assertEquals(REGION, multiLangConfiguration.getDynamoDbClient().get("region"));
|
||||||
|
assertEquals(REGION, multiLangConfiguration.getCloudWatchClient().get("region"));
|
||||||
|
assertEquals(REGION, multiLangConfiguration.getKinesisClient().get("region"));
|
||||||
|
assertEquals(expectedStreamArn, multiLangConfiguration.getStreamArn());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPropertyValidation() {
|
||||||
|
String propertiesNoExecutableName = "applicationName = testApp \n" + "streamName = fakeStream \n"
|
||||||
+ "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + "processingLanguage = malbolge";
|
+ "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + "processingLanguage = malbolge";
|
||||||
ClassLoader classLoader = Mockito.mock(ClassLoader.class);
|
ClassLoader classLoader = Mockito.mock(ClassLoader.class);
|
||||||
|
|
||||||
Mockito.doReturn(new ByteArrayInputStream(PROPERTIES_NO_EXECUTABLE_NAME.getBytes())).when(classLoader)
|
Mockito.doReturn(new ByteArrayInputStream(propertiesNoExecutableName.getBytes())).when(classLoader)
|
||||||
.getResourceAsStream(FILENAME);
|
.getResourceAsStream(FILENAME);
|
||||||
|
|
||||||
MultiLangDaemonConfig config;
|
|
||||||
try {
|
try {
|
||||||
config = new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
||||||
Assert.fail("Construction of the config should have failed due to property validation failing.");
|
Assert.fail("Construction of the config should have failed due to property validation failing.");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
// Good
|
// Good
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,6 @@ package software.amazon.kinesis.multilang;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.empty;
|
import static org.hamcrest.Matchers.empty;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.isEmptyOrNullString;
|
|
||||||
import static org.hamcrest.Matchers.nullValue;
|
import static org.hamcrest.Matchers.nullValue;
|
||||||
import static org.junit.Assert.assertThat;
|
import static org.junit.Assert.assertThat;
|
||||||
import static org.mockito.Matchers.anyObject;
|
import static org.mockito.Matchers.anyObject;
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,6 @@ import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.Future;
|
import java.util.concurrent.Future;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
@ -61,10 +60,8 @@ import software.amazon.kinesis.multilang.messages.ShardEndedMessage;
|
||||||
import software.amazon.kinesis.multilang.messages.StatusMessage;
|
import software.amazon.kinesis.multilang.messages.StatusMessage;
|
||||||
import com.google.common.util.concurrent.SettableFuture;
|
import com.google.common.util.concurrent.SettableFuture;
|
||||||
|
|
||||||
import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration;
|
|
||||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||||
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
||||||
import software.amazon.kinesis.lifecycle.ShutdownReason;
|
|
||||||
import software.amazon.kinesis.processor.RecordProcessorCheckpointer;
|
import software.amazon.kinesis.processor.RecordProcessorCheckpointer;
|
||||||
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
||||||
|
|
||||||
|
|
@ -106,7 +103,7 @@ public class MultiLangProtocolTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void initializeTest() throws InterruptedException, ExecutionException {
|
public void testInitialize() {
|
||||||
when(messageWriter
|
when(messageWriter
|
||||||
.writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder()
|
.writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder()
|
||||||
.shardId(shardId).build())))).thenReturn(buildFuture(true));
|
.shardId(shardId).build())))).thenReturn(buildFuture(true));
|
||||||
|
|
@ -116,7 +113,7 @@ public class MultiLangProtocolTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void processRecordsTest() throws InterruptedException, ExecutionException {
|
public void testProcessRecords() {
|
||||||
when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true));
|
when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true));
|
||||||
when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(
|
when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(
|
||||||
new StatusMessage("processRecords"), Message.class));
|
new StatusMessage("processRecords"), Message.class));
|
||||||
|
|
@ -131,7 +128,6 @@ public class MultiLangProtocolTest {
|
||||||
when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage(LeaseLostMessage.ACTION), Message.class));
|
when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage(LeaseLostMessage.ACTION), Message.class));
|
||||||
|
|
||||||
assertThat(protocol.leaseLost(LeaseLostInput.builder().build()), equalTo(true));
|
assertThat(protocol.leaseLost(LeaseLostInput.builder().build()), equalTo(true));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -177,7 +173,7 @@ public class MultiLangProtocolTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void processRecordsWithCheckpointsTest() throws InterruptedException, ExecutionException,
|
public void testProcessRecordsWithCheckpoints() throws
|
||||||
KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||||
|
|
||||||
when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true));
|
when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true));
|
||||||
|
|
@ -206,7 +202,7 @@ public class MultiLangProtocolTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void processRecordsWithABadCheckpointTest() throws InterruptedException, ExecutionException {
|
public void testProcessRecordsWithABadCheckpoint() {
|
||||||
when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true));
|
when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true));
|
||||||
when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(false));
|
when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(false));
|
||||||
when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList<Message>() {
|
when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList<Message>() {
|
||||||
|
|
|
||||||
|
|
@ -27,12 +27,10 @@ import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
import software.amazon.kinesis.multilang.DrainChildSTDERRTask;
|
|
||||||
import software.amazon.kinesis.multilang.LineReaderTask;
|
|
||||||
|
|
||||||
public class ReadSTDERRTaskTest {
|
public class ReadSTDERRTaskTest {
|
||||||
|
|
||||||
private static final String shardId = "shard-123";
|
private static final String SHARD_ID = "shard-123";
|
||||||
private BufferedReader mockBufferReader;
|
private BufferedReader mockBufferReader;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
|
@ -45,7 +43,7 @@ public class ReadSTDERRTaskTest {
|
||||||
|
|
||||||
String errorMessages = "OMG\nThis is test message\n blah blah blah \n";
|
String errorMessages = "OMG\nThis is test message\n blah blah blah \n";
|
||||||
InputStream stream = new ByteArrayInputStream(errorMessages.getBytes());
|
InputStream stream = new ByteArrayInputStream(errorMessages.getBytes());
|
||||||
LineReaderTask<Boolean> reader = new DrainChildSTDERRTask().initialize(stream, shardId, "");
|
LineReaderTask<Boolean> reader = new DrainChildSTDERRTask().initialize(stream, SHARD_ID, "");
|
||||||
Assert.assertNotNull(reader);
|
Assert.assertNotNull(reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -54,7 +52,7 @@ public class ReadSTDERRTaskTest {
|
||||||
String errorMessages = "OMG\nThis is test message\n blah blah blah \n";
|
String errorMessages = "OMG\nThis is test message\n blah blah blah \n";
|
||||||
BufferedReader bufferReader =
|
BufferedReader bufferReader =
|
||||||
new BufferedReader(new InputStreamReader(new ByteArrayInputStream(errorMessages.getBytes())));
|
new BufferedReader(new InputStreamReader(new ByteArrayInputStream(errorMessages.getBytes())));
|
||||||
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(bufferReader, shardId, "");
|
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(bufferReader, SHARD_ID, "");
|
||||||
Assert.assertNotNull(errorReader);
|
Assert.assertNotNull(errorReader);
|
||||||
|
|
||||||
Boolean result = errorReader.call();
|
Boolean result = errorReader.call();
|
||||||
|
|
@ -67,7 +65,7 @@ public class ReadSTDERRTaskTest {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
Assert.fail("Not supposed to get an exception when we're just building our mock.");
|
Assert.fail("Not supposed to get an exception when we're just building our mock.");
|
||||||
}
|
}
|
||||||
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(mockBufferReader, shardId, "");
|
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(mockBufferReader, SHARD_ID, "");
|
||||||
Assert.assertNotNull(errorReader);
|
Assert.assertNotNull(errorReader);
|
||||||
Future<Boolean> result = Executors.newCachedThreadPool().submit(errorReader);
|
Future<Boolean> result = Executors.newCachedThreadPool().submit(errorReader);
|
||||||
Boolean finishedCleanly = null;
|
Boolean finishedCleanly = null;
|
||||||
|
|
|
||||||
|
|
@ -14,12 +14,9 @@
|
||||||
*/
|
*/
|
||||||
package software.amazon.kinesis.multilang;
|
package software.amazon.kinesis.multilang;
|
||||||
|
|
||||||
import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration;
|
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import software.amazon.kinesis.multilang.MultiLangRecordProcessorFactory;
|
|
||||||
import software.amazon.kinesis.multilang.MultiLangShardRecordProcessor;
|
|
||||||
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
||||||
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,6 @@ import static org.mockito.Mockito.times;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
|
@ -46,9 +45,7 @@ import org.mockito.stubbing.Answer;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.Record;
|
import software.amazon.awssdk.services.kinesis.model.Record;
|
||||||
import software.amazon.kinesis.exceptions.InvalidStateException;
|
|
||||||
import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException;
|
import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException;
|
||||||
import software.amazon.kinesis.exceptions.ShutdownException;
|
|
||||||
import software.amazon.kinesis.exceptions.ThrottlingException;
|
import software.amazon.kinesis.exceptions.ThrottlingException;
|
||||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||||
import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
|
import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
|
||||||
|
|
@ -67,7 +64,7 @@ import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
public class StreamingShardRecordProcessorTest {
|
public class StreamingShardRecordProcessorTest {
|
||||||
|
|
||||||
private static final String shardId = "shard-123";
|
private static final String SHARD_ID = "shard-123";
|
||||||
|
|
||||||
private int systemExitCount = 0;
|
private int systemExitCount = 0;
|
||||||
|
|
||||||
|
|
@ -79,77 +76,73 @@ public class StreamingShardRecordProcessorTest {
|
||||||
private RecordProcessorCheckpointer unimplementedCheckpointer = new RecordProcessorCheckpointer() {
|
private RecordProcessorCheckpointer unimplementedCheckpointer = new RecordProcessorCheckpointer() {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException,
|
public void checkpoint() throws KinesisClientLibDependencyException, ThrottlingException {
|
||||||
ThrottlingException, ShutdownException {
|
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void checkpoint(String sequenceNumber) throws KinesisClientLibDependencyException,
|
public void checkpoint(String sequenceNumber) throws KinesisClientLibDependencyException,
|
||||||
InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
ThrottlingException, IllegalArgumentException {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void checkpoint(Record record)
|
public void checkpoint(Record record)
|
||||||
throws KinesisClientLibDependencyException,
|
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||||
InvalidStateException, ThrottlingException, ShutdownException {
|
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void checkpoint(String sequenceNumber, long subSequenceNumber)
|
public void checkpoint(String sequenceNumber, long subSequenceNumber)
|
||||||
throws KinesisClientLibDependencyException,
|
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||||
InvalidStateException, ThrottlingException, ShutdownException,
|
|
||||||
IllegalArgumentException {
|
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint()
|
public PreparedCheckpointer prepareCheckpoint()
|
||||||
throws KinesisClientLibDependencyException,
|
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||||
InvalidStateException, ThrottlingException, ShutdownException {
|
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint(byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
public PreparedCheckpointer prepareCheckpoint(byte[] applicationState)
|
||||||
|
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint(Record record)
|
public PreparedCheckpointer prepareCheckpoint(Record record)
|
||||||
throws KinesisClientLibDependencyException,
|
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||||
InvalidStateException, ThrottlingException, ShutdownException {
|
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState)
|
||||||
|
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber)
|
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber)
|
||||||
throws KinesisClientLibDependencyException,
|
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||||
InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState)
|
||||||
|
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber)
|
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber)
|
||||||
throws KinesisClientLibDependencyException,
|
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||||
InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState)
|
||||||
|
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -171,7 +164,7 @@ public class StreamingShardRecordProcessorTest {
|
||||||
private MultiLangDaemonConfiguration configuration;
|
private MultiLangDaemonConfiguration configuration;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void prepare() throws IOException, InterruptedException, ExecutionException {
|
public void prepare() throws InterruptedException, ExecutionException {
|
||||||
// Fake command
|
// Fake command
|
||||||
systemExitCount = 0;
|
systemExitCount = 0;
|
||||||
|
|
||||||
|
|
@ -230,7 +223,7 @@ public class StreamingShardRecordProcessorTest {
|
||||||
|
|
||||||
List<KinesisClientRecord> testRecords = Collections.emptyList();
|
List<KinesisClientRecord> testRecords = Collections.emptyList();
|
||||||
|
|
||||||
recordProcessor.initialize(InitializationInput.builder().shardId(shardId).build());
|
recordProcessor.initialize(InitializationInput.builder().shardId(SHARD_ID).build());
|
||||||
recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords)
|
recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords)
|
||||||
.checkpointer(unimplementedCheckpointer).build());
|
.checkpointer(unimplementedCheckpointer).build());
|
||||||
recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords)
|
recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords)
|
||||||
|
|
@ -240,7 +233,6 @@ public class StreamingShardRecordProcessorTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void processorPhasesTest() throws InterruptedException, ExecutionException {
|
public void processorPhasesTest() throws InterruptedException, ExecutionException {
|
||||||
|
|
||||||
Answer<StatusMessage> answer = new Answer<StatusMessage>() {
|
Answer<StatusMessage> answer = new Answer<StatusMessage>() {
|
||||||
|
|
||||||
StatusMessage[] answers = new StatusMessage[] { new StatusMessage(InitializeMessage.ACTION),
|
StatusMessage[] answers = new StatusMessage[] { new StatusMessage(InitializeMessage.ACTION),
|
||||||
|
|
@ -263,7 +255,7 @@ public class StreamingShardRecordProcessorTest {
|
||||||
|
|
||||||
verify(messageWriter)
|
verify(messageWriter)
|
||||||
.writeInitializeMessage(argThat(Matchers.withInit(
|
.writeInitializeMessage(argThat(Matchers.withInit(
|
||||||
InitializationInput.builder().shardId(shardId).build())));
|
InitializationInput.builder().shardId(SHARD_ID).build())));
|
||||||
verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class));
|
verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class));
|
||||||
verify(messageWriter).writeLeaseLossMessage(any(LeaseLostInput.class));
|
verify(messageWriter).writeLeaseLossMessage(any(LeaseLostInput.class));
|
||||||
}
|
}
|
||||||
|
|
@ -295,7 +287,7 @@ public class StreamingShardRecordProcessorTest {
|
||||||
phases(answer);
|
phases(answer);
|
||||||
|
|
||||||
verify(messageWriter).writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder()
|
verify(messageWriter).writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder()
|
||||||
.shardId(shardId).build())));
|
.shardId(SHARD_ID).build())));
|
||||||
verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class));
|
verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class));
|
||||||
verify(messageWriter, never()).writeLeaseLossMessage(any(LeaseLostInput.class));
|
verify(messageWriter, never()).writeLeaseLossMessage(any(LeaseLostInput.class));
|
||||||
Assert.assertEquals(1, systemExitCount);
|
Assert.assertEquals(1, systemExitCount);
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,6 @@ package software.amazon.kinesis.multilang.config;
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertThat;
|
import static org.junit.Assert.assertThat;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
@ -32,11 +31,6 @@ import com.amazonaws.auth.AWSCredentials;
|
||||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||||
import com.amazonaws.auth.AWSCredentialsProviderChain;
|
import com.amazonaws.auth.AWSCredentialsProviderChain;
|
||||||
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentials;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain;
|
|
||||||
|
|
||||||
public class AWSCredentialsProviderPropertyValueDecoderTest {
|
public class AWSCredentialsProviderPropertyValueDecoderTest {
|
||||||
|
|
||||||
private static final String TEST_ACCESS_KEY_ID = "123";
|
private static final String TEST_ACCESS_KEY_ID = "123";
|
||||||
|
|
|
||||||
|
|
@ -278,10 +278,8 @@ public class KinesisClientLibConfiguratorTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test(expected = IllegalArgumentException.class)
|
||||||
public void testWithMissingCredentialsProvider() {
|
public void testWithMissingCredentialsProvider() {
|
||||||
thrown.expect(IllegalArgumentException.class);
|
|
||||||
thrown.expectMessage("A basic set of AWS credentials must be provided");
|
|
||||||
|
|
||||||
String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", "workerId = 123",
|
String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", "workerId = 123",
|
||||||
"failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, '\n');
|
"failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, '\n');
|
||||||
|
|
@ -305,22 +303,37 @@ public class KinesisClientLibConfiguratorTest {
|
||||||
assertFalse(config.getWorkerIdentifier().isEmpty());
|
assertFalse(config.getWorkerIdentifier().isEmpty());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test(expected = NullPointerException.class)
|
||||||
public void testWithMissingStreamName() {
|
public void testWithMissingStreamNameAndMissingStreamArn() {
|
||||||
thrown.expect(NullPointerException.class);
|
String test = StringUtils.join(new String[] {
|
||||||
thrown.expectMessage("Stream name is required");
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = " + credentialName1,
|
||||||
String test = StringUtils.join(new String[] { "applicationName = b",
|
"workerId = 123",
|
||||||
"AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = 100" }, '\n');
|
"failoverTimeMillis = 100" },
|
||||||
|
'\n');
|
||||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||||
|
|
||||||
configurator.getConfiguration(input);
|
configurator.getConfiguration(input);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public void testWithEmptyStreamNameAndMissingStreamArn() {
|
||||||
|
|
||||||
|
String test = StringUtils.join(new String[] {
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = " + credentialName1,
|
||||||
|
"workerId = 123",
|
||||||
|
"failoverTimeMillis = 100",
|
||||||
|
"streamName = ",
|
||||||
|
"streamArn = "},
|
||||||
|
'\n');
|
||||||
|
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||||
|
|
||||||
|
configurator.getConfiguration(input);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = NullPointerException.class)
|
||||||
public void testWithMissingApplicationName() {
|
public void testWithMissingApplicationName() {
|
||||||
thrown.expect(NullPointerException.class);
|
|
||||||
thrown.expectMessage("Application name is required");
|
|
||||||
|
|
||||||
String test = StringUtils.join(new String[] { "streamName = a", "AWSCredentialsProvider = " + credentialName1,
|
String test = StringUtils.join(new String[] { "streamName = a", "AWSCredentialsProvider = " + credentialName1,
|
||||||
"workerId = 123", "failoverTimeMillis = 100" }, '\n');
|
"workerId = 123", "failoverTimeMillis = 100" }, '\n');
|
||||||
|
|
|
||||||
|
|
@ -26,13 +26,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||||
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
||||||
import software.amazon.kinesis.lifecycle.ShutdownReason;
|
import software.amazon.kinesis.lifecycle.ShutdownReason;
|
||||||
import software.amazon.kinesis.multilang.messages.CheckpointMessage;
|
|
||||||
import software.amazon.kinesis.multilang.messages.InitializeMessage;
|
|
||||||
import software.amazon.kinesis.multilang.messages.Message;
|
|
||||||
import software.amazon.kinesis.multilang.messages.ProcessRecordsMessage;
|
|
||||||
import software.amazon.kinesis.multilang.messages.ShutdownMessage;
|
|
||||||
import software.amazon.kinesis.multilang.messages.ShutdownRequestedMessage;
|
|
||||||
import software.amazon.kinesis.multilang.messages.StatusMessage;
|
|
||||||
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
||||||
|
|
||||||
public class MessageTest {
|
public class MessageTest {
|
||||||
|
|
@ -56,7 +49,7 @@ public class MessageTest {
|
||||||
new ProcessRecordsMessage(),
|
new ProcessRecordsMessage(),
|
||||||
new ShutdownRequestedMessage(),
|
new ShutdownRequestedMessage(),
|
||||||
new LeaseLostMessage(),
|
new LeaseLostMessage(),
|
||||||
new ShardEndedMessage()
|
new ShardEndedMessage(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: fix this
|
// TODO: fix this
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>software.amazon.kinesis</groupId>
|
<groupId>software.amazon.kinesis</groupId>
|
||||||
<artifactId>amazon-kinesis-client-pom</artifactId>
|
<artifactId>amazon-kinesis-client-pom</artifactId>
|
||||||
<version>2.5.1-SNAPSHOT</version>
|
<version>2.5.1</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<artifactId>amazon-kinesis-client</artifactId>
|
<artifactId>amazon-kinesis-client</artifactId>
|
||||||
|
|
@ -89,7 +89,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
<version>31.1-jre</version>
|
<version>32.0.0-jre</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.protobuf</groupId>
|
<groupId>com.google.protobuf</groupId>
|
||||||
|
|
|
||||||
|
|
@ -144,7 +144,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint(byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
public PreparedCheckpointer prepareCheckpoint(byte[] applicationState)
|
||||||
|
throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||||
return prepareCheckpoint(largestPermittedCheckpointValue.sequenceNumber(), applicationState);
|
return prepareCheckpoint(largestPermittedCheckpointValue.sequenceNumber(), applicationState);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -152,7 +153,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState)
|
||||||
|
throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||||
//
|
//
|
||||||
// TODO: UserRecord Deprecation
|
// TODO: UserRecord Deprecation
|
||||||
//
|
//
|
||||||
|
|
|
||||||
|
|
@ -103,7 +103,8 @@ public class DynamoDBCheckpointer implements Checkpointer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, byte[] pendingCheckpointState) throws KinesisClientLibException {
|
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken,
|
||||||
|
byte[] pendingCheckpointState) throws KinesisClientLibException {
|
||||||
try {
|
try {
|
||||||
boolean wasSuccessful =
|
boolean wasSuccessful =
|
||||||
prepareCheckpoint(leaseKey, pendingCheckpoint, UUID.fromString(concurrencyToken), pendingCheckpointState);
|
prepareCheckpoint(leaseKey, pendingCheckpoint, UUID.fromString(concurrencyToken), pendingCheckpointState);
|
||||||
|
|
|
||||||
|
|
@ -23,10 +23,11 @@ import software.amazon.awssdk.services.kinesis.model.HashKeyRange;
|
||||||
|
|
||||||
import java.math.BigInteger;
|
import java.math.BigInteger;
|
||||||
|
|
||||||
@Value @Accessors(fluent = true)
|
|
||||||
/**
|
/**
|
||||||
* Lease POJO to hold the starting hashkey range and ending hashkey range of kinesis shards.
|
* Lease POJO to hold the starting hashkey range and ending hashkey range of kinesis shards.
|
||||||
*/
|
*/
|
||||||
|
@Accessors(fluent = true)
|
||||||
|
@Value
|
||||||
public class HashKeyRangeForLease {
|
public class HashKeyRangeForLease {
|
||||||
|
|
||||||
private final BigInteger startingHashKey;
|
private final BigInteger startingHashKey;
|
||||||
|
|
|
||||||
|
|
@ -167,12 +167,22 @@ public class StreamIdentifier {
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void validateArn(Arn streamArn) {
|
/**
|
||||||
|
* Verify the streamArn follows the appropriate formatting.
|
||||||
|
* Throw an exception if it does not.
|
||||||
|
* @param streamArn
|
||||||
|
*/
|
||||||
|
public static void validateArn(Arn streamArn) {
|
||||||
if (!STREAM_ARN_PATTERN.matcher(streamArn.toString()).matches() || !streamArn.region().isPresent()) {
|
if (!STREAM_ARN_PATTERN.matcher(streamArn.toString()).matches() || !streamArn.region().isPresent()) {
|
||||||
throw new IllegalArgumentException("Unable to create a StreamIdentifier from " + streamArn);
|
throw new IllegalArgumentException("Invalid streamArn " + streamArn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify creationEpoch is greater than 0.
|
||||||
|
* Throw an exception if it is not.
|
||||||
|
* @param creationEpoch
|
||||||
|
*/
|
||||||
private static void validateCreationEpoch(long creationEpoch) {
|
private static void validateCreationEpoch(long creationEpoch) {
|
||||||
if (creationEpoch <= 0) {
|
if (creationEpoch <= 0) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
|
|
|
||||||
|
|
@ -348,7 +348,7 @@ class PeriodicShardSyncManager {
|
||||||
((MultiStreamLease) lease).shardId() :
|
((MultiStreamLease) lease).shardId() :
|
||||||
lease.leaseKey();
|
lease.leaseKey();
|
||||||
final Shard shard = kinesisShards.get(shardId);
|
final Shard shard = kinesisShards.get(shardId);
|
||||||
if(shard == null) {
|
if (shard == null) {
|
||||||
return lease;
|
return lease;
|
||||||
}
|
}
|
||||||
lease.hashKeyRange(fromHashKeyRange(shard.hashKeyRange()));
|
lease.hashKeyRange(fromHashKeyRange(shard.hashKeyRange()));
|
||||||
|
|
@ -372,7 +372,7 @@ class PeriodicShardSyncManager {
|
||||||
List<Lease> leasesWithHashKeyRanges) {
|
List<Lease> leasesWithHashKeyRanges) {
|
||||||
// Sort the hash ranges by starting hash key.
|
// Sort the hash ranges by starting hash key.
|
||||||
List<Lease> sortedLeasesWithHashKeyRanges = sortLeasesByHashRange(leasesWithHashKeyRanges);
|
List<Lease> sortedLeasesWithHashKeyRanges = sortLeasesByHashRange(leasesWithHashKeyRanges);
|
||||||
if(sortedLeasesWithHashKeyRanges.isEmpty()) {
|
if (sortedLeasesWithHashKeyRanges.isEmpty()) {
|
||||||
log.error("No leases with valid hashranges found for stream {}", streamIdentifier);
|
log.error("No leases with valid hashranges found for stream {}", streamIdentifier);
|
||||||
return Optional.of(new HashRangeHole());
|
return Optional.of(new HashRangeHole());
|
||||||
}
|
}
|
||||||
|
|
@ -417,8 +417,9 @@ class PeriodicShardSyncManager {
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
static List<Lease> sortLeasesByHashRange(List<Lease> leasesWithHashKeyRanges) {
|
static List<Lease> sortLeasesByHashRange(List<Lease> leasesWithHashKeyRanges) {
|
||||||
if (leasesWithHashKeyRanges.size() == 0 || leasesWithHashKeyRanges.size() == 1)
|
if (leasesWithHashKeyRanges.size() == 0 || leasesWithHashKeyRanges.size() == 1) {
|
||||||
return leasesWithHashKeyRanges;
|
return leasesWithHashKeyRanges;
|
||||||
|
}
|
||||||
Collections.sort(leasesWithHashKeyRanges, new HashKeyRangeComparator());
|
Collections.sort(leasesWithHashKeyRanges, new HashKeyRangeComparator());
|
||||||
return leasesWithHashKeyRanges;
|
return leasesWithHashKeyRanges;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -544,7 +544,8 @@ public class Scheduler implements Runnable {
|
||||||
final Map<Boolean, Set<StreamIdentifier>> staleStreamIdDeletionDecisionMap = staleStreamDeletionMap.keySet().stream().collect(Collectors
|
final Map<Boolean, Set<StreamIdentifier>> staleStreamIdDeletionDecisionMap = staleStreamDeletionMap.keySet().stream().collect(Collectors
|
||||||
.partitioningBy(newStreamConfigMap::containsKey, Collectors.toSet()));
|
.partitioningBy(newStreamConfigMap::containsKey, Collectors.toSet()));
|
||||||
final Set<StreamIdentifier> staleStreamIdsToBeDeleted = staleStreamIdDeletionDecisionMap.get(false).stream().filter(streamIdentifier ->
|
final Set<StreamIdentifier> staleStreamIdsToBeDeleted = staleStreamIdDeletionDecisionMap.get(false).stream().filter(streamIdentifier ->
|
||||||
Duration.between(staleStreamDeletionMap.get(streamIdentifier), Instant.now()).toMillis() >= waitPeriodToDeleteOldStreams.toMillis()).collect(Collectors.toSet());
|
Duration.between(staleStreamDeletionMap.get(streamIdentifier), Instant.now()).toMillis() >= waitPeriodToDeleteOldStreams.toMillis())
|
||||||
|
.collect(Collectors.toSet());
|
||||||
// These are the streams which are deleted in Kinesis and we encounter resource not found during
|
// These are the streams which are deleted in Kinesis and we encounter resource not found during
|
||||||
// shardSyncTask. This is applicable in MultiStreamMode only, in case of SingleStreamMode, store will
|
// shardSyncTask. This is applicable in MultiStreamMode only, in case of SingleStreamMode, store will
|
||||||
// not have any data.
|
// not have any data.
|
||||||
|
|
@ -611,7 +612,7 @@ public class Scheduler implements Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void removeStreamsFromStaleStreamsList(Set<StreamIdentifier> streamIdentifiers) {
|
private void removeStreamsFromStaleStreamsList(Set<StreamIdentifier> streamIdentifiers) {
|
||||||
for(StreamIdentifier streamIdentifier : streamIdentifiers) {
|
for (StreamIdentifier streamIdentifier : streamIdentifiers) {
|
||||||
staleStreamDeletionMap.remove(streamIdentifier);
|
staleStreamDeletionMap.remove(streamIdentifier);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -80,7 +80,7 @@ public class HierarchicalShardSyncer {
|
||||||
|
|
||||||
private static final String MIN_HASH_KEY = BigInteger.ZERO.toString();
|
private static final String MIN_HASH_KEY = BigInteger.ZERO.toString();
|
||||||
private static final String MAX_HASH_KEY = new BigInteger("2").pow(128).subtract(BigInteger.ONE).toString();
|
private static final String MAX_HASH_KEY = new BigInteger("2").pow(128).subtract(BigInteger.ONE).toString();
|
||||||
private static final int retriesForCompleteHashRange = 3;
|
private static final int RETRIES_FOR_COMPLETE_HASH_RANGE = 3;
|
||||||
|
|
||||||
private static final long DELAY_BETWEEN_LIST_SHARDS_MILLIS = 1000;
|
private static final long DELAY_BETWEEN_LIST_SHARDS_MILLIS = 1000;
|
||||||
|
|
||||||
|
|
@ -98,7 +98,7 @@ public class HierarchicalShardSyncer {
|
||||||
this.deletedStreamListProvider = deletedStreamListProvider;
|
this.deletedStreamListProvider = deletedStreamListProvider;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final BiFunction<Lease, MultiStreamArgs, String> shardIdFromLeaseDeducer =
|
private static final BiFunction<Lease, MultiStreamArgs, String> SHARD_ID_FROM_LEASE_DEDUCER =
|
||||||
(lease, multiStreamArgs) ->
|
(lease, multiStreamArgs) ->
|
||||||
multiStreamArgs.isMultiStreamMode() ?
|
multiStreamArgs.isMultiStreamMode() ?
|
||||||
((MultiStreamLease) lease).shardId() :
|
((MultiStreamLease) lease).shardId() :
|
||||||
|
|
@ -129,7 +129,9 @@ public class HierarchicalShardSyncer {
|
||||||
isLeaseTableEmpty);
|
isLeaseTableEmpty);
|
||||||
}
|
}
|
||||||
|
|
||||||
//Provide a pre-collcted list of shards to avoid calling ListShards API
|
/**
|
||||||
|
* Provide a pre-collected list of shards to avoid calling ListShards API
|
||||||
|
*/
|
||||||
public synchronized boolean checkAndCreateLeaseForNewShards(@NonNull final ShardDetector shardDetector,
|
public synchronized boolean checkAndCreateLeaseForNewShards(@NonNull final ShardDetector shardDetector,
|
||||||
final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition,
|
final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition,
|
||||||
List<Shard> latestShards, final boolean ignoreUnexpectedChildShards, final MetricsScope scope, final boolean isLeaseTableEmpty)
|
List<Shard> latestShards, final boolean ignoreUnexpectedChildShards, final MetricsScope scope, final boolean isLeaseTableEmpty)
|
||||||
|
|
@ -163,7 +165,7 @@ public class HierarchicalShardSyncer {
|
||||||
final long startTime = System.currentTimeMillis();
|
final long startTime = System.currentTimeMillis();
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
try {
|
try {
|
||||||
if(leaseRefresher.createLeaseIfNotExists(lease)) {
|
if (leaseRefresher.createLeaseIfNotExists(lease)) {
|
||||||
createdLeases.add(lease);
|
createdLeases.add(lease);
|
||||||
}
|
}
|
||||||
success = true;
|
success = true;
|
||||||
|
|
@ -268,7 +270,7 @@ public class HierarchicalShardSyncer {
|
||||||
|
|
||||||
List<Shard> shards;
|
List<Shard> shards;
|
||||||
|
|
||||||
for (int i = 0; i < retriesForCompleteHashRange; i++) {
|
for (int i = 0; i < RETRIES_FOR_COMPLETE_HASH_RANGE; i++) {
|
||||||
shards = shardDetector.listShardsWithFilter(shardFilter);
|
shards = shardDetector.listShardsWithFilter(shardFilter);
|
||||||
|
|
||||||
if (shards == null) {
|
if (shards == null) {
|
||||||
|
|
@ -284,7 +286,7 @@ public class HierarchicalShardSyncer {
|
||||||
}
|
}
|
||||||
|
|
||||||
throw new KinesisClientLibIOException("Hash range of shards returned for " + streamName + " was incomplete after "
|
throw new KinesisClientLibIOException("Hash range of shards returned for " + streamName + " was incomplete after "
|
||||||
+ retriesForCompleteHashRange + " retries.");
|
+ RETRIES_FOR_COMPLETE_HASH_RANGE + " retries.");
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<Shard> getShardList(@NonNull final ShardDetector shardDetector) throws KinesisClientLibIOException {
|
private List<Shard> getShardList(@NonNull final ShardDetector shardDetector) throws KinesisClientLibIOException {
|
||||||
|
|
@ -365,7 +367,8 @@ public class HierarchicalShardSyncer {
|
||||||
* @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard
|
* @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard
|
||||||
*/
|
*/
|
||||||
static List<Lease> determineNewLeasesToCreate(final LeaseSynchronizer leaseSynchronizer, final List<Shard> shards,
|
static List<Lease> determineNewLeasesToCreate(final LeaseSynchronizer leaseSynchronizer, final List<Shard> shards,
|
||||||
final List<Lease> currentLeases, final InitialPositionInStreamExtended initialPosition,final Set<String> inconsistentShardIds) {
|
final List<Lease> currentLeases, final InitialPositionInStreamExtended initialPosition,
|
||||||
|
final Set<String> inconsistentShardIds) {
|
||||||
return determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, initialPosition, inconsistentShardIds,
|
return determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, initialPosition, inconsistentShardIds,
|
||||||
new MultiStreamArgs(false, null));
|
new MultiStreamArgs(false, null));
|
||||||
}
|
}
|
||||||
|
|
@ -499,11 +502,13 @@ public class HierarchicalShardSyncer {
|
||||||
if (descendantParentShardIds.contains(parentShardId)
|
if (descendantParentShardIds.contains(parentShardId)
|
||||||
&& !initialPosition.getInitialPositionInStream()
|
&& !initialPosition.getInitialPositionInStream()
|
||||||
.equals(InitialPositionInStream.AT_TIMESTAMP)) {
|
.equals(InitialPositionInStream.AT_TIMESTAMP)) {
|
||||||
log.info("Setting Lease '{}' checkpoint to 'TRIM_HORIZON'. Checkpoint was previously set to {}", lease.leaseKey(), lease.checkpoint());
|
log.info("Setting Lease '{}' checkpoint to 'TRIM_HORIZON'. Checkpoint was previously set to {}",
|
||||||
|
lease.leaseKey(), lease.checkpoint());
|
||||||
lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||||
} else {
|
} else {
|
||||||
final ExtendedSequenceNumber newCheckpoint = convertToCheckpoint(initialPosition);
|
final ExtendedSequenceNumber newCheckpoint = convertToCheckpoint(initialPosition);
|
||||||
log.info("Setting Lease '{}' checkpoint to '{}'. Checkpoint was previously set to {}", lease.leaseKey(), newCheckpoint, lease.checkpoint());
|
log.info("Setting Lease '{}' checkpoint to '{}'. Checkpoint was previously set to {}",
|
||||||
|
lease.leaseKey(), newCheckpoint, lease.checkpoint());
|
||||||
lease.checkpoint(newCheckpoint);
|
lease.checkpoint(newCheckpoint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -728,8 +733,8 @@ public class HierarchicalShardSyncer {
|
||||||
@Override
|
@Override
|
||||||
public int compare(final Lease lease1, final Lease lease2) {
|
public int compare(final Lease lease1, final Lease lease2) {
|
||||||
int result = 0;
|
int result = 0;
|
||||||
final String shardId1 = shardIdFromLeaseDeducer.apply(lease1, multiStreamArgs);
|
final String shardId1 = SHARD_ID_FROM_LEASE_DEDUCER.apply(lease1, multiStreamArgs);
|
||||||
final String shardId2 = shardIdFromLeaseDeducer.apply(lease2, multiStreamArgs);
|
final String shardId2 = SHARD_ID_FROM_LEASE_DEDUCER.apply(lease2, multiStreamArgs);
|
||||||
final Shard shard1 = shardIdToShardMap.get(shardId1);
|
final Shard shard1 = shardIdToShardMap.get(shardId1);
|
||||||
final Shard shard2 = shardIdToShardMap.get(shardId2);
|
final Shard shard2 = shardIdToShardMap.get(shardId2);
|
||||||
|
|
||||||
|
|
@ -802,7 +807,7 @@ public class HierarchicalShardSyncer {
|
||||||
final Map<String, Shard> shardIdToShardMapOfAllKinesisShards = constructShardIdToShardMap(shards);
|
final Map<String, Shard> shardIdToShardMapOfAllKinesisShards = constructShardIdToShardMap(shards);
|
||||||
|
|
||||||
currentLeases.stream().peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease))
|
currentLeases.stream().peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease))
|
||||||
.map(lease -> shardIdFromLeaseDeducer.apply(lease, multiStreamArgs))
|
.map(lease -> SHARD_ID_FROM_LEASE_DEDUCER.apply(lease, multiStreamArgs))
|
||||||
.collect(Collectors.toSet());
|
.collect(Collectors.toSet());
|
||||||
|
|
||||||
final List<Lease> newLeasesToCreate = getLeasesToCreateForOpenAndClosedShards(initialPosition, shards, multiStreamArgs, streamIdentifier);
|
final List<Lease> newLeasesToCreate = getLeasesToCreateForOpenAndClosedShards(initialPosition, shards, multiStreamArgs, streamIdentifier);
|
||||||
|
|
@ -908,7 +913,7 @@ public class HierarchicalShardSyncer {
|
||||||
.map(streamId -> streamId.serialize()).orElse("");
|
.map(streamId -> streamId.serialize()).orElse("");
|
||||||
final Set<String> shardIdsOfCurrentLeases = currentLeases.stream()
|
final Set<String> shardIdsOfCurrentLeases = currentLeases.stream()
|
||||||
.peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease))
|
.peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease))
|
||||||
.map(lease -> shardIdFromLeaseDeducer.apply(lease, multiStreamArgs))
|
.map(lease -> SHARD_ID_FROM_LEASE_DEDUCER.apply(lease, multiStreamArgs))
|
||||||
.collect(Collectors.toSet());
|
.collect(Collectors.toSet());
|
||||||
|
|
||||||
final List<Shard> openShards = getOpenShards(shards, streamIdentifier);
|
final List<Shard> openShards = getOpenShards(shards, streamIdentifier);
|
||||||
|
|
|
||||||
|
|
@ -179,7 +179,7 @@ public class LeaseCleanupManager {
|
||||||
try {
|
try {
|
||||||
if (cleanupLeasesUponShardCompletion && timeToCheckForCompletedShard) {
|
if (cleanupLeasesUponShardCompletion && timeToCheckForCompletedShard) {
|
||||||
final Lease leaseFromDDB = leaseCoordinator.leaseRefresher().getLease(lease.leaseKey());
|
final Lease leaseFromDDB = leaseCoordinator.leaseRefresher().getLease(lease.leaseKey());
|
||||||
if(leaseFromDDB != null) {
|
if (leaseFromDDB != null) {
|
||||||
Set<String> childShardKeys = leaseFromDDB.childShardIds();
|
Set<String> childShardKeys = leaseFromDDB.childShardIds();
|
||||||
if (CollectionUtils.isNullOrEmpty(childShardKeys)) {
|
if (CollectionUtils.isNullOrEmpty(childShardKeys)) {
|
||||||
try {
|
try {
|
||||||
|
|
|
||||||
|
|
@ -310,7 +310,7 @@ public class LeaseManagementConfig {
|
||||||
private LeaseManagementFactory leaseManagementFactory;
|
private LeaseManagementFactory leaseManagementFactory;
|
||||||
|
|
||||||
public HierarchicalShardSyncer hierarchicalShardSyncer() {
|
public HierarchicalShardSyncer hierarchicalShardSyncer() {
|
||||||
if(hierarchicalShardSyncer == null) {
|
if (hierarchicalShardSyncer == null) {
|
||||||
hierarchicalShardSyncer = new HierarchicalShardSyncer();
|
hierarchicalShardSyncer = new HierarchicalShardSyncer();
|
||||||
}
|
}
|
||||||
return hierarchicalShardSyncer;
|
return hierarchicalShardSyncer;
|
||||||
|
|
@ -356,7 +356,7 @@ public class LeaseManagementConfig {
|
||||||
* @return LeaseManagementFactory
|
* @return LeaseManagementFactory
|
||||||
*/
|
*/
|
||||||
public LeaseManagementFactory leaseManagementFactory(final LeaseSerializer leaseSerializer, boolean isMultiStreamingMode) {
|
public LeaseManagementFactory leaseManagementFactory(final LeaseSerializer leaseSerializer, boolean isMultiStreamingMode) {
|
||||||
if(leaseManagementFactory == null) {
|
if (leaseManagementFactory == null) {
|
||||||
leaseManagementFactory = new DynamoDBLeaseManagementFactory(kinesisClient(),
|
leaseManagementFactory = new DynamoDBLeaseManagementFactory(kinesisClient(),
|
||||||
dynamoDBClient(),
|
dynamoDBClient(),
|
||||||
tableName(),
|
tableName(),
|
||||||
|
|
|
||||||
|
|
@ -23,8 +23,6 @@ import lombok.Setter;
|
||||||
import lombok.experimental.Accessors;
|
import lombok.experimental.Accessors;
|
||||||
import org.apache.commons.lang3.Validate;
|
import org.apache.commons.lang3.Validate;
|
||||||
|
|
||||||
import java.util.Objects;
|
|
||||||
|
|
||||||
import static com.google.common.base.Verify.verifyNotNull;
|
import static com.google.common.base.Verify.verifyNotNull;
|
||||||
|
|
||||||
@Setter
|
@Setter
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ import software.amazon.kinesis.metrics.MetricsUtil;
|
||||||
@Slf4j
|
@Slf4j
|
||||||
@KinesisClientInternalApi
|
@KinesisClientInternalApi
|
||||||
public class ShardSyncTask implements ConsumerTask {
|
public class ShardSyncTask implements ConsumerTask {
|
||||||
private final String SHARD_SYNC_TASK_OPERATION = "ShardSyncTask";
|
private static final String SHARD_SYNC_TASK_OPERATION = "ShardSyncTask";
|
||||||
|
|
||||||
@NonNull
|
@NonNull
|
||||||
private final ShardDetector shardDetector;
|
private final ShardDetector shardDetector;
|
||||||
|
|
|
||||||
|
|
@ -329,8 +329,9 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void stopLeaseTaker() {
|
public void stopLeaseTaker() {
|
||||||
|
if (takerFuture != null) {
|
||||||
takerFuture.cancel(false);
|
takerFuture.cancel(false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
||||||
|
|
@ -187,7 +187,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
||||||
public boolean createLeaseTableIfNotExists(@NonNull final Long readCapacity, @NonNull final Long writeCapacity)
|
public boolean createLeaseTableIfNotExists(@NonNull final Long readCapacity, @NonNull final Long writeCapacity)
|
||||||
throws ProvisionedThroughputException, DependencyException {
|
throws ProvisionedThroughputException, DependencyException {
|
||||||
final CreateTableRequest.Builder builder = createTableRequestBuilder();
|
final CreateTableRequest.Builder builder = createTableRequestBuilder();
|
||||||
if(BillingMode.PROVISIONED.equals(billingMode)) {
|
if (BillingMode.PROVISIONED.equals(billingMode)) {
|
||||||
ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(readCapacity)
|
ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(readCapacity)
|
||||||
.writeCapacityUnits(writeCapacity).build();
|
.writeCapacityUnits(writeCapacity).build();
|
||||||
builder.provisionedThroughput(throughput);
|
builder.provisionedThroughput(throughput);
|
||||||
|
|
@ -467,7 +467,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
||||||
} catch (DynamoDbException | TimeoutException e) {
|
} catch (DynamoDbException | TimeoutException e) {
|
||||||
throw convertAndRethrowExceptions("create", lease.leaseKey(), e);
|
throw convertAndRethrowExceptions("create", lease.leaseKey(), e);
|
||||||
}
|
}
|
||||||
log.info("Created lease: {}",lease);
|
log.info("Created lease: {}", lease);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -89,7 +89,7 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer {
|
||||||
result.put(PENDING_CHECKPOINT_STATE_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber()));
|
result.put(PENDING_CHECKPOINT_STATE_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if(lease.hashKeyRangeForLease() != null) {
|
if (lease.hashKeyRangeForLease() != null) {
|
||||||
result.put(STARTING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey()));
|
result.put(STARTING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey()));
|
||||||
result.put(ENDING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey()));
|
result.put(ENDING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey()));
|
||||||
}
|
}
|
||||||
|
|
@ -274,7 +274,7 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer {
|
||||||
result.put(CHILD_SHARD_IDS_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.childShardIds())));
|
result.put(CHILD_SHARD_IDS_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.childShardIds())));
|
||||||
}
|
}
|
||||||
|
|
||||||
if(lease.hashKeyRangeForLease() != null) {
|
if (lease.hashKeyRangeForLease() != null) {
|
||||||
result.put(STARTING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey())));
|
result.put(STARTING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey())));
|
||||||
result.put(ENDING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey())));
|
result.put(ENDING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey())));
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,9 +19,9 @@ package software.amazon.kinesis.leases.exceptions;
|
||||||
*/
|
*/
|
||||||
public class CustomerApplicationException extends Exception {
|
public class CustomerApplicationException extends Exception {
|
||||||
|
|
||||||
public CustomerApplicationException(Throwable e) { super(e);}
|
public CustomerApplicationException(Throwable e) { super(e); }
|
||||||
|
|
||||||
public CustomerApplicationException(String message, Throwable e) { super(message, e);}
|
public CustomerApplicationException(String message, Throwable e) { super(message, e); }
|
||||||
|
|
||||||
public CustomerApplicationException(String message) { super(message);}
|
public CustomerApplicationException(String message) { super(message); }
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -212,8 +212,10 @@ public class ProcessTask implements ConsumerTask {
|
||||||
log.debug("Calling application processRecords() with {} records from {}", records.size(),
|
log.debug("Calling application processRecords() with {} records from {}", records.size(),
|
||||||
shardInfoId);
|
shardInfoId);
|
||||||
|
|
||||||
final ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder().records(records).cacheExitTime(input.cacheExitTime()).cacheEntryTime(input.cacheEntryTime())
|
final ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder().records(records)
|
||||||
.isAtShardEnd(input.isAtShardEnd()).checkpointer(recordProcessorCheckpointer).millisBehindLatest(input.millisBehindLatest()).build();
|
.cacheExitTime(input.cacheExitTime()).cacheEntryTime(input.cacheEntryTime())
|
||||||
|
.isAtShardEnd(input.isAtShardEnd()).checkpointer(recordProcessorCheckpointer)
|
||||||
|
.millisBehindLatest(input.millisBehindLatest()).build();
|
||||||
|
|
||||||
final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, PROCESS_TASK_OPERATION);
|
final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, PROCESS_TASK_OPERATION);
|
||||||
shardInfo.streamIdentifierSerOpt()
|
shardInfo.streamIdentifierSerOpt()
|
||||||
|
|
|
||||||
|
|
@ -61,7 +61,7 @@ class ShardConsumerSubscriber implements Subscriber<RecordsRetrieved> {
|
||||||
@Deprecated
|
@Deprecated
|
||||||
ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
|
ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
|
||||||
ShardConsumer shardConsumer) {
|
ShardConsumer shardConsumer) {
|
||||||
this(recordsPublisher,executorService,bufferSize,shardConsumer, LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
|
this(recordsPublisher, executorService, bufferSize, shardConsumer, LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
|
||||||
}
|
}
|
||||||
|
|
||||||
ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
|
ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
|
||||||
|
|
@ -74,7 +74,6 @@ class ShardConsumerSubscriber implements Subscriber<RecordsRetrieved> {
|
||||||
this.shardInfoId = ShardInfo.getLeaseKey(shardConsumer.shardInfo());
|
this.shardInfoId = ShardInfo.getLeaseKey(shardConsumer.shardInfo());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void startSubscriptions() {
|
void startSubscriptions() {
|
||||||
synchronized (lockObject) {
|
synchronized (lockObject) {
|
||||||
// Setting the lastRequestTime to allow for health checks to restart subscriptions if they failed to
|
// Setting the lastRequestTime to allow for health checks to restart subscriptions if they failed to
|
||||||
|
|
@ -131,7 +130,9 @@ class ShardConsumerSubscriber implements Subscriber<RecordsRetrieved> {
|
||||||
Duration timeSinceLastResponse = Duration.between(lastRequestTime, now);
|
Duration timeSinceLastResponse = Duration.between(lastRequestTime, now);
|
||||||
if (timeSinceLastResponse.toMillis() > maxTimeBetweenRequests) {
|
if (timeSinceLastResponse.toMillis() > maxTimeBetweenRequests) {
|
||||||
log.error(
|
log.error(
|
||||||
|
// CHECKSTYLE.OFF: LineLength
|
||||||
"{}: Last request was dispatched at {}, but no response as of {} ({}). Cancelling subscription, and restarting. Last successful request details -- {}",
|
"{}: Last request was dispatched at {}, but no response as of {} ({}). Cancelling subscription, and restarting. Last successful request details -- {}",
|
||||||
|
// CHECKSTYLE.ON: LineLength
|
||||||
shardInfoId, lastRequestTime, now, timeSinceLastResponse, recordsPublisher.getLastSuccessfulRequestDetails());
|
shardInfoId, lastRequestTime, now, timeSinceLastResponse, recordsPublisher.getLastSuccessfulRequestDetails());
|
||||||
cancel();
|
cancel();
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -283,7 +283,7 @@ public class ShutdownTask implements ConsumerTask {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for(ChildShard childShard : childShards) {
|
for (ChildShard childShard : childShards) {
|
||||||
final String leaseKey = ShardInfo.getLeaseKey(shardInfo, childShard.shardId());
|
final String leaseKey = ShardInfo.getLeaseKey(shardInfo, childShard.shardId());
|
||||||
if (leaseRefresher.getLease(leaseKey) == null) {
|
if (leaseRefresher.getLease(leaseKey) == null) {
|
||||||
log.debug("{} - Shard {} - Attempting to create lease for child shard {}", shardDetector.streamIdentifier(), shardInfo.shardId(), leaseKey);
|
log.debug("{} - Shard {} - Attempting to create lease for child shard {}", shardDetector.streamIdentifier(), shardInfo.shardId(), leaseKey);
|
||||||
|
|
|
||||||
|
|
@ -20,9 +20,7 @@ import java.util.Objects;
|
||||||
import software.amazon.awssdk.services.cloudwatch.model.Dimension;
|
import software.amazon.awssdk.services.cloudwatch.model.Dimension;
|
||||||
import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
|
import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
|
||||||
|
|
||||||
|
/**
|
||||||
|
|
||||||
/*
|
|
||||||
* A representation of a key of a MetricDatum. This class is useful when wanting to compare
|
* A representation of a key of a MetricDatum. This class is useful when wanting to compare
|
||||||
* whether 2 keys have the same MetricDatum. This feature will be used in MetricAccumulatingQueue
|
* whether 2 keys have the same MetricDatum. This feature will be used in MetricAccumulatingQueue
|
||||||
* where we aggregate metrics across multiple MetricScopes.
|
* where we aggregate metrics across multiple MetricScopes.
|
||||||
|
|
@ -48,12 +46,15 @@ public class CloudWatchMetricKey {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object obj) {
|
public boolean equals(Object obj) {
|
||||||
if (this == obj)
|
if (this == obj) {
|
||||||
return true;
|
return true;
|
||||||
if (obj == null)
|
}
|
||||||
|
if (obj == null) {
|
||||||
return false;
|
return false;
|
||||||
if (getClass() != obj.getClass())
|
}
|
||||||
|
if (getClass() != obj.getClass()) {
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
CloudWatchMetricKey other = (CloudWatchMetricKey) obj;
|
CloudWatchMetricKey other = (CloudWatchMetricKey) obj;
|
||||||
return Objects.equals(other.dimensions, dimensions) && Objects.equals(other.metricName, metricName);
|
return Objects.equals(other.dimensions, dimensions) && Objects.equals(other.metricName, metricName);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,6 @@
|
||||||
package software.amazon.kinesis.metrics;
|
package software.amazon.kinesis.metrics;
|
||||||
|
|
||||||
import lombok.AllArgsConstructor;
|
import lombok.AllArgsConstructor;
|
||||||
import lombok.Data;
|
|
||||||
import lombok.Setter;
|
import lombok.Setter;
|
||||||
import lombok.experimental.Accessors;
|
import lombok.experimental.Accessors;
|
||||||
import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
|
import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
|
||||||
|
|
@ -36,7 +35,6 @@ import java.util.Objects;
|
||||||
*
|
*
|
||||||
* MetricDatumWithKey<SampleMetricKey> sampleDatumWithKey = new MetricDatumWithKey<SampleMetricKey>(new
|
* MetricDatumWithKey<SampleMetricKey> sampleDatumWithKey = new MetricDatumWithKey<SampleMetricKey>(new
|
||||||
* SampleMetricKey(System.currentTimeMillis()), datum)
|
* SampleMetricKey(System.currentTimeMillis()), datum)
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
@AllArgsConstructor
|
@AllArgsConstructor
|
||||||
@Setter
|
@Setter
|
||||||
|
|
@ -59,12 +57,15 @@ public class MetricDatumWithKey<KeyType> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object obj) {
|
public boolean equals(Object obj) {
|
||||||
if (this == obj)
|
if (this == obj) {
|
||||||
return true;
|
return true;
|
||||||
if (obj == null)
|
}
|
||||||
|
if (obj == null) {
|
||||||
return false;
|
return false;
|
||||||
if (getClass() != obj.getClass())
|
}
|
||||||
|
if (getClass() != obj.getClass()) {
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
MetricDatumWithKey<?> other = (MetricDatumWithKey<?>) obj;
|
MetricDatumWithKey<?> other = (MetricDatumWithKey<?>) obj;
|
||||||
return Objects.equals(other.key, key) && Objects.equals(other.datum, datum);
|
return Objects.equals(other.key, key) && Objects.equals(other.datum, datum);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ public class RetrievalConfig {
|
||||||
*/
|
*/
|
||||||
public static final String KINESIS_CLIENT_LIB_USER_AGENT = "amazon-kinesis-client-library-java";
|
public static final String KINESIS_CLIENT_LIB_USER_AGENT = "amazon-kinesis-client-library-java";
|
||||||
|
|
||||||
public static final String KINESIS_CLIENT_LIB_USER_AGENT_VERSION = "2.5.1-SNAPSHOT";
|
public static final String KINESIS_CLIENT_LIB_USER_AGENT_VERSION = "2.5.1";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Client used to make calls to Kinesis for records retrieval
|
* Client used to make calls to Kinesis for records retrieval
|
||||||
|
|
@ -152,7 +152,7 @@ public class RetrievalConfig {
|
||||||
if (streamTracker().isMultiStream()) {
|
if (streamTracker().isMultiStream()) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Cannot set initialPositionInStreamExtended when multiStreamTracker is set");
|
"Cannot set initialPositionInStreamExtended when multiStreamTracker is set");
|
||||||
};
|
}
|
||||||
|
|
||||||
final StreamIdentifier streamIdentifier = getSingleStreamIdentifier();
|
final StreamIdentifier streamIdentifier = getSingleStreamIdentifier();
|
||||||
final StreamConfig updatedConfig = new StreamConfig(streamIdentifier, initialPositionInStreamExtended);
|
final StreamConfig updatedConfig = new StreamConfig(streamIdentifier, initialPositionInStreamExtended);
|
||||||
|
|
|
||||||
|
|
@ -27,14 +27,12 @@ import org.reactivestreams.Subscriber;
|
||||||
import org.reactivestreams.Subscription;
|
import org.reactivestreams.Subscription;
|
||||||
import software.amazon.awssdk.core.async.SdkPublisher;
|
import software.amazon.awssdk.core.async.SdkPublisher;
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
||||||
import software.amazon.awssdk.services.kinesis.model.ChildShard;
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
|
import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
|
||||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
|
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
|
||||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream;
|
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream;
|
||||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
|
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
|
||||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponse;
|
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponse;
|
||||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
|
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
|
||||||
import software.amazon.awssdk.utils.CollectionUtils;
|
|
||||||
import software.amazon.awssdk.utils.Either;
|
import software.amazon.awssdk.utils.Either;
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
||||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||||
|
|
@ -117,7 +115,6 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
this.currentSequenceNumber = extendedSequenceNumber.sequenceNumber();
|
this.currentSequenceNumber = extendedSequenceNumber.sequenceNumber();
|
||||||
this.isFirstConnection = true;
|
this.isFirstConnection = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
@ -192,7 +189,7 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
// Take action based on the time spent by the event in queue.
|
// Take action based on the time spent by the event in queue.
|
||||||
takeDelayedDeliveryActionIfRequired(streamAndShardId, recordsRetrievedContext.getEnqueueTimestamp(), log);
|
takeDelayedDeliveryActionIfRequired(streamAndShardId, recordsRetrievedContext.getEnqueueTimestamp(), log);
|
||||||
// Update current sequence number for the successfully delivered event.
|
// Update current sequence number for the successfully delivered event.
|
||||||
currentSequenceNumber = ((FanoutRecordsRetrieved)recordsRetrieved).continuationSequenceNumber();
|
currentSequenceNumber = ((FanoutRecordsRetrieved) recordsRetrieved).continuationSequenceNumber();
|
||||||
// Update the triggering flow for post scheduling upstream request.
|
// Update the triggering flow for post scheduling upstream request.
|
||||||
flowToBeReturned = recordsRetrievedContext.getRecordFlow();
|
flowToBeReturned = recordsRetrievedContext.getRecordFlow();
|
||||||
// Try scheduling the next event in the queue or execute the subscription shutdown action.
|
// Try scheduling the next event in the queue or execute the subscription shutdown action.
|
||||||
|
|
@ -206,7 +203,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
if (flow != null && recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier()
|
if (flow != null && recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier()
|
||||||
.equals(flow.getSubscribeToShardId())) {
|
.equals(flow.getSubscribeToShardId())) {
|
||||||
log.error(
|
log.error(
|
||||||
"{}: Received unexpected ack for the active subscription {}. Throwing. ", streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier());
|
"{}: Received unexpected ack for the active subscription {}. Throwing.",
|
||||||
|
streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier());
|
||||||
throw new IllegalStateException("Unexpected ack for the active subscription");
|
throw new IllegalStateException("Unexpected ack for the active subscription");
|
||||||
}
|
}
|
||||||
// Otherwise publisher received a stale ack.
|
// Otherwise publisher received a stale ack.
|
||||||
|
|
@ -275,7 +273,6 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
SubscriptionShutdownEvent(Runnable subscriptionShutdownAction, String eventIdentifier) {
|
SubscriptionShutdownEvent(Runnable subscriptionShutdownAction, String eventIdentifier) {
|
||||||
this(subscriptionShutdownAction, eventIdentifier, null);
|
this(subscriptionShutdownAction, eventIdentifier, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean hasValidSubscriber() {
|
private boolean hasValidSubscriber() {
|
||||||
|
|
@ -315,7 +312,7 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
synchronized (lockObject) {
|
synchronized (lockObject) {
|
||||||
|
|
||||||
if (!hasValidSubscriber()) {
|
if (!hasValidSubscriber()) {
|
||||||
if(hasValidFlow()) {
|
if (hasValidFlow()) {
|
||||||
log.warn(
|
log.warn(
|
||||||
"{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- Subscriber is null." +
|
"{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- Subscriber is null." +
|
||||||
" Last successful request details -- {}", streamAndShardId, flow.connectionStartedAt,
|
" Last successful request details -- {}", streamAndShardId, flow.connectionStartedAt,
|
||||||
|
|
@ -335,7 +332,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
if (flow != null) {
|
if (flow != null) {
|
||||||
String logMessage = String.format(
|
String logMessage = String.format(
|
||||||
"%s: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ %s id: %s -- %s." +
|
"%s: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ %s id: %s -- %s." +
|
||||||
" Last successful request details -- %s", streamAndShardId, flow.connectionStartedAt, flow.subscribeToShardId, category.throwableTypeString, lastSuccessfulRequestDetails);
|
" Last successful request details -- %s", streamAndShardId, flow.connectionStartedAt,
|
||||||
|
flow.subscribeToShardId, category.throwableTypeString, lastSuccessfulRequestDetails);
|
||||||
switch (category.throwableType) {
|
switch (category.throwableType) {
|
||||||
case READ_TIMEOUT:
|
case READ_TIMEOUT:
|
||||||
log.debug(logMessage, propagationThrowable);
|
log.debug(logMessage, propagationThrowable);
|
||||||
|
|
@ -367,7 +365,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
} else {
|
} else {
|
||||||
if (triggeringFlow != null) {
|
if (triggeringFlow != null) {
|
||||||
log.debug(
|
log.debug(
|
||||||
|
// CHECKSTYLE.OFF: LineLength
|
||||||
"{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- {} -> triggeringFlow wasn't the active flow. Didn't dispatch error",
|
"{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- {} -> triggeringFlow wasn't the active flow. Didn't dispatch error",
|
||||||
|
// CHECKSTYLE.ON: LineLength
|
||||||
streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId,
|
streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId,
|
||||||
category.throwableTypeString);
|
category.throwableTypeString);
|
||||||
triggeringFlow.cancel();
|
triggeringFlow.cancel();
|
||||||
|
|
@ -603,7 +603,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
synchronized (lockObject) {
|
synchronized (lockObject) {
|
||||||
if (subscriber != s) {
|
if (subscriber != s) {
|
||||||
log.warn(
|
log.warn(
|
||||||
|
// CHECKSTYLE.OFF: LineLength
|
||||||
"{}: (FanOutRecordsPublisher/Subscription#request) - Rejected an attempt to request({}), because subscribers don't match. Last successful request details -- {}",
|
"{}: (FanOutRecordsPublisher/Subscription#request) - Rejected an attempt to request({}), because subscribers don't match. Last successful request details -- {}",
|
||||||
|
// CHECKSTYLE.ON: LineLength
|
||||||
streamAndShardId, n, lastSuccessfulRequestDetails);
|
streamAndShardId, n, lastSuccessfulRequestDetails);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -630,13 +632,17 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
synchronized (lockObject) {
|
synchronized (lockObject) {
|
||||||
if (subscriber != s) {
|
if (subscriber != s) {
|
||||||
log.warn(
|
log.warn(
|
||||||
|
// CHECKSTYLE.OFF: LineLength
|
||||||
"{}: (FanOutRecordsPublisher/Subscription#cancel) - Rejected attempt to cancel subscription, because subscribers don't match. Last successful request details -- {}",
|
"{}: (FanOutRecordsPublisher/Subscription#cancel) - Rejected attempt to cancel subscription, because subscribers don't match. Last successful request details -- {}",
|
||||||
|
// CHECKSTYLE.ON: LineLength
|
||||||
streamAndShardId, lastSuccessfulRequestDetails);
|
streamAndShardId, lastSuccessfulRequestDetails);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!hasValidSubscriber()) {
|
if (!hasValidSubscriber()) {
|
||||||
log.warn(
|
log.warn(
|
||||||
|
// CHECKSTYLE.OFF: LineLength
|
||||||
"{}: (FanOutRecordsPublisher/Subscription#cancel) - Cancelled called even with an invalid subscriber. Last successful request details -- {}",
|
"{}: (FanOutRecordsPublisher/Subscription#cancel) - Cancelled called even with an invalid subscriber. Last successful request details -- {}",
|
||||||
|
// CHECKSTYLE.ON: LineLength
|
||||||
streamAndShardId, lastSuccessfulRequestDetails);
|
streamAndShardId, lastSuccessfulRequestDetails);
|
||||||
}
|
}
|
||||||
subscriber = null;
|
subscriber = null;
|
||||||
|
|
@ -778,7 +784,11 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
executeExceptionOccurred(throwable);
|
executeExceptionOccurred(throwable);
|
||||||
} else {
|
} else {
|
||||||
final SubscriptionShutdownEvent subscriptionShutdownEvent = new SubscriptionShutdownEvent(
|
final SubscriptionShutdownEvent subscriptionShutdownEvent = new SubscriptionShutdownEvent(
|
||||||
() -> {parent.recordsDeliveryQueue.poll(); executeExceptionOccurred(throwable);}, "onError", throwable);
|
() -> {
|
||||||
|
parent.recordsDeliveryQueue.poll();
|
||||||
|
executeExceptionOccurred(throwable);
|
||||||
|
},
|
||||||
|
"onError", throwable);
|
||||||
tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent);
|
tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -786,7 +796,6 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
|
|
||||||
private void executeExceptionOccurred(Throwable throwable) {
|
private void executeExceptionOccurred(Throwable throwable) {
|
||||||
synchronized (parent.lockObject) {
|
synchronized (parent.lockObject) {
|
||||||
|
|
||||||
log.debug("{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- {}: {}",
|
log.debug("{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- {}: {}",
|
||||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(),
|
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(),
|
||||||
throwable.getMessage());
|
throwable.getMessage());
|
||||||
|
|
@ -803,7 +812,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
isErrorDispatched = true;
|
isErrorDispatched = true;
|
||||||
} else {
|
} else {
|
||||||
log.debug(
|
log.debug(
|
||||||
|
// CHECKSTYLE.OFF: LineLength
|
||||||
"{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- An error has previously been dispatched, not dispatching this error {}: {}",
|
"{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- An error has previously been dispatched, not dispatching this error {}: {}",
|
||||||
|
// CHECKSTYLE.OFF: LineLength
|
||||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(),
|
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(),
|
||||||
throwable.getMessage());
|
throwable.getMessage());
|
||||||
}
|
}
|
||||||
|
|
@ -817,7 +828,11 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
executeComplete();
|
executeComplete();
|
||||||
} else {
|
} else {
|
||||||
final SubscriptionShutdownEvent subscriptionShutdownEvent = new SubscriptionShutdownEvent(
|
final SubscriptionShutdownEvent subscriptionShutdownEvent = new SubscriptionShutdownEvent(
|
||||||
() -> {parent.recordsDeliveryQueue.poll(); executeComplete();}, "onComplete");
|
() -> {
|
||||||
|
parent.recordsDeliveryQueue.poll();
|
||||||
|
executeComplete();
|
||||||
|
},
|
||||||
|
"onComplete");
|
||||||
tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent);
|
tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -830,7 +845,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
.add(new RecordsRetrievedContext(Either.right(subscriptionShutdownEvent), this, Instant.now()));
|
.add(new RecordsRetrievedContext(Either.right(subscriptionShutdownEvent), this, Instant.now()));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
log.warn(
|
log.warn(
|
||||||
|
// CHECKSTYLE.OFF: LineLength
|
||||||
"{}: Unable to enqueue the {} shutdown event due to capacity restrictions in delivery queue with remaining capacity {}. Ignoring. Last successful request details -- {}",
|
"{}: Unable to enqueue the {} shutdown event due to capacity restrictions in delivery queue with remaining capacity {}. Ignoring. Last successful request details -- {}",
|
||||||
|
// CHECKSTYLE.ON: LineLength
|
||||||
parent.streamAndShardId, subscriptionShutdownEvent.getEventIdentifier(), parent.recordsDeliveryQueue.remainingCapacity(),
|
parent.streamAndShardId, subscriptionShutdownEvent.getEventIdentifier(), parent.recordsDeliveryQueue.remainingCapacity(),
|
||||||
parent.lastSuccessfulRequestDetails, subscriptionShutdownEvent.getShutdownEventThrowableOptional());
|
parent.lastSuccessfulRequestDetails, subscriptionShutdownEvent.getShutdownEventThrowableOptional());
|
||||||
}
|
}
|
||||||
|
|
@ -854,7 +871,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
||||||
}
|
}
|
||||||
if (this.isDisposed) {
|
if (this.isDisposed) {
|
||||||
log.warn(
|
log.warn(
|
||||||
|
// CHECKSTYLE.OFF: LineLength
|
||||||
"{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- This flow has been disposed not dispatching completion. Last successful request details -- {}",
|
"{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- This flow has been disposed not dispatching completion. Last successful request details -- {}",
|
||||||
|
// CHECKSTYLE.ON: LineLength
|
||||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, parent.lastSuccessfulRequestDetails);
|
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, parent.lastSuccessfulRequestDetails);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,7 @@ public class FanOutRetrievalFactory implements RetrievalFactory {
|
||||||
final StreamConfig streamConfig,
|
final StreamConfig streamConfig,
|
||||||
final MetricsFactory metricsFactory) {
|
final MetricsFactory metricsFactory) {
|
||||||
final Optional<String> streamIdentifierStr = shardInfo.streamIdentifierSerOpt();
|
final Optional<String> streamIdentifierStr = shardInfo.streamIdentifierSerOpt();
|
||||||
if(streamIdentifierStr.isPresent()) {
|
if (streamIdentifierStr.isPresent()) {
|
||||||
final StreamIdentifier streamIdentifier = StreamIdentifier.multiStreamInstance(streamIdentifierStr.get());
|
final StreamIdentifier streamIdentifier = StreamIdentifier.multiStreamInstance(streamIdentifierStr.get());
|
||||||
return new FanOutRecordsPublisher(kinesisClient, shardInfo.shardId(),
|
return new FanOutRecordsPublisher(kinesisClient, shardInfo.shardId(),
|
||||||
getOrCreateConsumerArn(streamIdentifier, streamConfig.consumerArn()),
|
getOrCreateConsumerArn(streamIdentifier, streamConfig.consumerArn()),
|
||||||
|
|
|
||||||
|
|
@ -145,7 +145,9 @@ public class KinesisDataFetcher implements DataFetcher {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CHECKSTYLE.OFF: MemberName
|
||||||
final DataFetcherResult TERMINAL_RESULT = new DataFetcherResult() {
|
final DataFetcherResult TERMINAL_RESULT = new DataFetcherResult() {
|
||||||
|
// CHECKSTYLE.ON: MemberName
|
||||||
@Override
|
@Override
|
||||||
public GetRecordsResponse getResult() {
|
public GetRecordsResponse getResult() {
|
||||||
return GetRecordsResponse.builder()
|
return GetRecordsResponse.builder()
|
||||||
|
|
|
||||||
|
|
@ -137,7 +137,7 @@ public class PollingConfig implements RetrievalSpecificConfig {
|
||||||
@Override
|
@Override
|
||||||
public RetrievalFactory retrievalFactory() {
|
public RetrievalFactory retrievalFactory() {
|
||||||
// Prioritize the PollingConfig specified value if its updated.
|
// Prioritize the PollingConfig specified value if its updated.
|
||||||
if(usePollingConfigIdleTimeValue) {
|
if (usePollingConfigIdleTimeValue) {
|
||||||
recordsFetcherFactory.idleMillisBetweenCalls(idleTimeBetweenReadsInMillis);
|
recordsFetcherFactory.idleMillisBetweenCalls(idleTimeBetweenReadsInMillis);
|
||||||
}
|
}
|
||||||
return new SynchronousBlockingRetrievalFactory(streamName(), kinesisClient(), recordsFetcherFactory,
|
return new SynchronousBlockingRetrievalFactory(streamName(), kinesisClient(), recordsFetcherFactory,
|
||||||
|
|
|
||||||
|
|
@ -327,7 +327,7 @@ public class PrefetchRecordsPublisher implements RecordsPublisher {
|
||||||
}
|
}
|
||||||
resetLock.writeLock().lock();
|
resetLock.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
publisherSession.reset((PrefetchRecordsRetrieved)recordsRetrieved);
|
publisherSession.reset((PrefetchRecordsRetrieved) recordsRetrieved);
|
||||||
wasReset = true;
|
wasReset = true;
|
||||||
} finally {
|
} finally {
|
||||||
resetLock.writeLock().unlock();
|
resetLock.writeLock().unlock();
|
||||||
|
|
@ -555,7 +555,7 @@ public class PrefetchRecordsPublisher implements RecordsPublisher {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Add a sleep if lastSuccessfulCall is still null but this is not the first try to avoid retry storm
|
// Add a sleep if lastSuccessfulCall is still null but this is not the first try to avoid retry storm
|
||||||
if(lastSuccessfulCall == null) {
|
if (lastSuccessfulCall == null) {
|
||||||
Thread.sleep(idleMillisBetweenCalls);
|
Thread.sleep(idleMillisBetweenCalls);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,6 @@ package software.amazon.kinesis.checkpoint;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import software.amazon.kinesis.exceptions.KinesisClientLibException;
|
|
||||||
import software.amazon.kinesis.processor.Checkpointer;
|
import software.amazon.kinesis.processor.Checkpointer;
|
||||||
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
||||||
|
|
||||||
|
|
@ -39,8 +38,7 @@ public class InMemoryCheckpointer implements Checkpointer {
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void setCheckpoint(String leaseKey, ExtendedSequenceNumber checkpointValue, String concurrencyToken)
|
public void setCheckpoint(String leaseKey, ExtendedSequenceNumber checkpointValue, String concurrencyToken) {
|
||||||
throws KinesisClientLibException {
|
|
||||||
checkpoints.put(leaseKey, checkpointValue);
|
checkpoints.put(leaseKey, checkpointValue);
|
||||||
flushpoints.put(leaseKey, checkpointValue);
|
flushpoints.put(leaseKey, checkpointValue);
|
||||||
pendingCheckpoints.remove(leaseKey);
|
pendingCheckpoints.remove(leaseKey);
|
||||||
|
|
@ -49,33 +47,32 @@ public class InMemoryCheckpointer implements Checkpointer {
|
||||||
if (log.isDebugEnabled()) {
|
if (log.isDebugEnabled()) {
|
||||||
log.debug("shardId: {} checkpoint: {}", leaseKey, checkpointValue);
|
log.debug("shardId: {} checkpoint: {}", leaseKey, checkpointValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public ExtendedSequenceNumber getCheckpoint(String leaseKey) throws KinesisClientLibException {
|
public ExtendedSequenceNumber getCheckpoint(String leaseKey) {
|
||||||
ExtendedSequenceNumber checkpoint = flushpoints.get(leaseKey);
|
ExtendedSequenceNumber checkpoint = flushpoints.get(leaseKey);
|
||||||
log.debug("checkpoint shardId: {} checkpoint: {}", leaseKey, checkpoint);
|
log.debug("checkpoint shardId: {} checkpoint: {}", leaseKey, checkpoint);
|
||||||
return checkpoint;
|
return checkpoint;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken)
|
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken) {
|
||||||
throws KinesisClientLibException {
|
|
||||||
prepareCheckpoint(leaseKey, pendingCheckpoint, concurrencyToken, null);
|
prepareCheckpoint(leaseKey, pendingCheckpoint, concurrencyToken, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, byte[] pendingCheckpointState) throws KinesisClientLibException {
|
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken,
|
||||||
|
byte[] pendingCheckpointState) {
|
||||||
pendingCheckpoints.put(leaseKey, pendingCheckpoint);
|
pendingCheckpoints.put(leaseKey, pendingCheckpoint);
|
||||||
pendingCheckpointStates.put(leaseKey, pendingCheckpointState);
|
pendingCheckpointStates.put(leaseKey, pendingCheckpointState);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Checkpoint getCheckpointObject(String leaseKey) throws KinesisClientLibException {
|
public Checkpoint getCheckpointObject(String leaseKey) {
|
||||||
ExtendedSequenceNumber checkpoint = flushpoints.get(leaseKey);
|
ExtendedSequenceNumber checkpoint = flushpoints.get(leaseKey);
|
||||||
ExtendedSequenceNumber pendingCheckpoint = pendingCheckpoints.get(leaseKey);
|
ExtendedSequenceNumber pendingCheckpoint = pendingCheckpoints.get(leaseKey);
|
||||||
byte[] pendingCheckpointState = pendingCheckpointStates.get(leaseKey);
|
byte[] pendingCheckpointState = pendingCheckpointStates.get(leaseKey);
|
||||||
|
|
|
||||||
|
|
@ -397,7 +397,7 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
||||||
assertThat(checkpointer.largestPermittedCheckpointValue(), equalTo(sequenceNumber));
|
assertThat(checkpointer.largestPermittedCheckpointValue(), equalTo(sequenceNumber));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* This test is a mixed test of checking some basic functionality of checkpointing at a sequence number and making
|
* This test is a mixed test of checking some basic functionality of checkpointing at a sequence number and making
|
||||||
* sure certain bounds checks and validations are being performed inside the checkpointer to prevent clients from
|
* sure certain bounds checks and validations are being performed inside the checkpointer to prevent clients from
|
||||||
* checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be checkpointing
|
* checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be checkpointing
|
||||||
|
|
@ -444,7 +444,7 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
||||||
new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string
|
new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string
|
||||||
ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max
|
ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max
|
||||||
ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value
|
ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value
|
||||||
ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value
|
ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value
|
||||||
};
|
};
|
||||||
for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) {
|
for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) {
|
||||||
try {
|
try {
|
||||||
|
|
@ -477,7 +477,7 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
||||||
processingCheckpointer.lastCheckpointValue(), equalTo(ExtendedSequenceNumber.SHARD_END));
|
processingCheckpointer.lastCheckpointValue(), equalTo(ExtendedSequenceNumber.SHARD_END));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* This test is a mixed test of checking some basic functionality of two phase checkpointing at a sequence number
|
* This test is a mixed test of checking some basic functionality of two phase checkpointing at a sequence number
|
||||||
* and making sure certain bounds checks and validations are being performed inside the checkpointer to prevent
|
* and making sure certain bounds checks and validations are being performed inside the checkpointer to prevent
|
||||||
* clients from checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be
|
* clients from checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be
|
||||||
|
|
@ -548,7 +548,7 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
||||||
new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string
|
new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string
|
||||||
ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max
|
ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max
|
||||||
ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value
|
ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value
|
||||||
ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value
|
ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value
|
||||||
};
|
};
|
||||||
for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) {
|
for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) {
|
||||||
try {
|
try {
|
||||||
|
|
@ -566,7 +566,6 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
||||||
assertThat("Largest sequence number should not have changed",
|
assertThat("Largest sequence number should not have changed",
|
||||||
processingCheckpointer.largestPermittedCheckpointValue(), equalTo(thirdSequenceNumber));
|
processingCheckpointer.largestPermittedCheckpointValue(), equalTo(thirdSequenceNumber));
|
||||||
assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue());
|
assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// advance to third number
|
// advance to third number
|
||||||
|
|
@ -601,7 +600,6 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
||||||
*
|
*
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("serial")
|
|
||||||
@Test
|
@Test
|
||||||
public final void testMixedCheckpointCalls() throws Exception {
|
public final void testMixedCheckpointCalls() throws Exception {
|
||||||
for (LinkedHashMap<String, CheckpointAction> testPlan : getMixedCallsTestPlan()) {
|
for (LinkedHashMap<String, CheckpointAction> testPlan : getMixedCallsTestPlan()) {
|
||||||
|
|
@ -617,7 +615,6 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
||||||
*
|
*
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("serial")
|
|
||||||
@Test
|
@Test
|
||||||
public final void testMixedTwoPhaseCheckpointCalls() throws Exception {
|
public final void testMixedTwoPhaseCheckpointCalls() throws Exception {
|
||||||
for (LinkedHashMap<String, CheckpointAction> testPlan : getMixedCallsTestPlan()) {
|
for (LinkedHashMap<String, CheckpointAction> testPlan : getMixedCallsTestPlan()) {
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,6 @@ import java.io.IOException;
|
||||||
import java.net.Inet4Address;
|
import java.net.Inet4Address;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default configuration for a producer or consumer used in integration tests.
|
* Default configuration for a producer or consumer used in integration tests.
|
||||||
|
|
@ -82,7 +81,6 @@ public abstract class KCLAppConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
public final KinesisAsyncClient buildAsyncKinesisClient() throws URISyntaxException, IOException {
|
public final KinesisAsyncClient buildAsyncKinesisClient() throws URISyntaxException, IOException {
|
||||||
|
|
||||||
if (kinesisAsyncClient == null) {
|
if (kinesisAsyncClient == null) {
|
||||||
// Setup H2 client config.
|
// Setup H2 client config.
|
||||||
final NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder()
|
final NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder()
|
||||||
|
|
|
||||||
|
|
@ -105,7 +105,7 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest {
|
||||||
@Test
|
@Test
|
||||||
public void testElectedLeadersAsPerExpectedShufflingOrder()
|
public void testElectedLeadersAsPerExpectedShufflingOrder()
|
||||||
throws Exception {
|
throws Exception {
|
||||||
List<Lease> leases = getLeases(5, false /*emptyLeaseOwner */,false /* duplicateLeaseOwner */, true /* activeLeases */);
|
List<Lease> leases = getLeases(5, false /*emptyLeaseOwner */, false /* duplicateLeaseOwner */, true /* activeLeases */);
|
||||||
when(leaseRefresher.listLeases()).thenReturn(leases);
|
when(leaseRefresher.listLeases()).thenReturn(leases);
|
||||||
Set<String> expectedLeaders = getExpectedLeaders(leases);
|
Set<String> expectedLeaders = getExpectedLeaders(leases);
|
||||||
for (String leader : expectedLeaders) {
|
for (String leader : expectedLeaders) {
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ public class DiagnosticEventsTest {
|
||||||
assertEquals(event.getLargestPoolSize(), largestPoolSize);
|
assertEquals(event.getLargestPoolSize(), largestPoolSize);
|
||||||
assertEquals(event.getMaximumPoolSize(), maximumPoolSize);
|
assertEquals(event.getMaximumPoolSize(), maximumPoolSize);
|
||||||
assertEquals(event.getLeasesOwned(), leaseAssignments.size());
|
assertEquals(event.getLeasesOwned(), leaseAssignments.size());
|
||||||
assertEquals(event.getCurrentQueueSize(),0);
|
assertEquals(0, event.getCurrentQueueSize());
|
||||||
|
|
||||||
verify(defaultHandler, times(1)).visit(event);
|
verify(defaultHandler, times(1)).visit(event);
|
||||||
}
|
}
|
||||||
|
|
@ -110,7 +110,7 @@ public class DiagnosticEventsTest {
|
||||||
assertEquals(event.getExecutorStateEvent().getLargestPoolSize(), largestPoolSize);
|
assertEquals(event.getExecutorStateEvent().getLargestPoolSize(), largestPoolSize);
|
||||||
assertEquals(event.getExecutorStateEvent().getMaximumPoolSize(), maximumPoolSize);
|
assertEquals(event.getExecutorStateEvent().getMaximumPoolSize(), maximumPoolSize);
|
||||||
assertEquals(event.getExecutorStateEvent().getLeasesOwned(), leaseAssignments.size());
|
assertEquals(event.getExecutorStateEvent().getLeasesOwned(), leaseAssignments.size());
|
||||||
assertEquals(event.getExecutorStateEvent().getCurrentQueueSize(),0);
|
assertEquals(0, event.getExecutorStateEvent().getCurrentQueueSize());
|
||||||
assertTrue(event.getThrowable() instanceof TestRejectedTaskException);
|
assertTrue(event.getThrowable() instanceof TestRejectedTaskException);
|
||||||
|
|
||||||
verify(defaultHandler, times(1)).visit(event);
|
verify(defaultHandler, times(1)).visit(event);
|
||||||
|
|
@ -136,7 +136,7 @@ public class DiagnosticEventsTest {
|
||||||
assertEquals(executorStateEvent.getLargestPoolSize(), largestPoolSize);
|
assertEquals(executorStateEvent.getLargestPoolSize(), largestPoolSize);
|
||||||
assertEquals(executorStateEvent.getMaximumPoolSize(), maximumPoolSize);
|
assertEquals(executorStateEvent.getMaximumPoolSize(), maximumPoolSize);
|
||||||
assertEquals(executorStateEvent.getLeasesOwned(), leaseAssignments.size());
|
assertEquals(executorStateEvent.getLeasesOwned(), leaseAssignments.size());
|
||||||
assertEquals(executorStateEvent.getCurrentQueueSize(),0);
|
assertEquals(0, executorStateEvent.getCurrentQueueSize());
|
||||||
|
|
||||||
RejectedTaskEvent rejectedTaskEvent = factory.rejectedTaskEvent(executorStateEvent,
|
RejectedTaskEvent rejectedTaskEvent = factory.rejectedTaskEvent(executorStateEvent,
|
||||||
new TestRejectedTaskException());
|
new TestRejectedTaskException());
|
||||||
|
|
@ -145,7 +145,7 @@ public class DiagnosticEventsTest {
|
||||||
assertEquals(rejectedTaskEvent.getExecutorStateEvent().getLargestPoolSize(), largestPoolSize);
|
assertEquals(rejectedTaskEvent.getExecutorStateEvent().getLargestPoolSize(), largestPoolSize);
|
||||||
assertEquals(rejectedTaskEvent.getExecutorStateEvent().getMaximumPoolSize(), maximumPoolSize);
|
assertEquals(rejectedTaskEvent.getExecutorStateEvent().getMaximumPoolSize(), maximumPoolSize);
|
||||||
assertEquals(rejectedTaskEvent.getExecutorStateEvent().getLeasesOwned(), leaseAssignments.size());
|
assertEquals(rejectedTaskEvent.getExecutorStateEvent().getLeasesOwned(), leaseAssignments.size());
|
||||||
assertEquals(rejectedTaskEvent.getExecutorStateEvent().getCurrentQueueSize(),0);
|
assertEquals(0, rejectedTaskEvent.getExecutorStateEvent().getCurrentQueueSize());
|
||||||
assertTrue(rejectedTaskEvent.getThrowable() instanceof TestRejectedTaskException);
|
assertTrue(rejectedTaskEvent.getThrowable() instanceof TestRejectedTaskException);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,12 +15,10 @@
|
||||||
package software.amazon.kinesis.coordinator;
|
package software.amazon.kinesis.coordinator;
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
import static org.hamcrest.CoreMatchers.not;
|
|
||||||
import static org.junit.Assert.assertThat;
|
import static org.junit.Assert.assertThat;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.Matchers.any;
|
||||||
import static org.mockito.Matchers.anyLong;
|
import static org.mockito.Matchers.anyLong;
|
||||||
import static org.mockito.Mockito.doAnswer;
|
import static org.mockito.Mockito.doAnswer;
|
||||||
import static org.mockito.Mockito.mock;
|
|
||||||
import static org.mockito.Mockito.never;
|
import static org.mockito.Mockito.never;
|
||||||
import static org.mockito.Mockito.times;
|
import static org.mockito.Mockito.times;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
|
|
|
||||||
|
|
@ -210,7 +210,7 @@ public class PeriodicShardSyncManagerTest {
|
||||||
}}.stream().map(hashKeyRangeForLease -> {
|
}}.stream().map(hashKeyRangeForLease -> {
|
||||||
MultiStreamLease lease = new MultiStreamLease();
|
MultiStreamLease lease = new MultiStreamLease();
|
||||||
lease.hashKeyRange(hashKeyRangeForLease);
|
lease.hashKeyRange(hashKeyRangeForLease);
|
||||||
if(lease.hashKeyRangeForLease().startingHashKey().toString().equals("4")) {
|
if (lease.hashKeyRangeForLease().startingHashKey().toString().equals("4")) {
|
||||||
lease.checkpoint(ExtendedSequenceNumber.SHARD_END);
|
lease.checkpoint(ExtendedSequenceNumber.SHARD_END);
|
||||||
} else {
|
} else {
|
||||||
lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||||
|
|
@ -342,7 +342,7 @@ public class PeriodicShardSyncManagerTest {
|
||||||
lease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-"+(++leaseCounter[0])));
|
lease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-"+(++leaseCounter[0])));
|
||||||
lease.shardId("shard-"+(leaseCounter[0]));
|
lease.shardId("shard-"+(leaseCounter[0]));
|
||||||
// Setting the hashrange only for last two leases
|
// Setting the hashrange only for last two leases
|
||||||
if(leaseCounter[0] >= 3) {
|
if (leaseCounter[0] >= 3) {
|
||||||
lease.hashKeyRange(hashKeyRangeForLease);
|
lease.hashKeyRange(hashKeyRangeForLease);
|
||||||
}
|
}
|
||||||
lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||||
|
|
@ -355,7 +355,7 @@ public class PeriodicShardSyncManagerTest {
|
||||||
Assert.assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync());
|
Assert.assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync());
|
||||||
|
|
||||||
// Assert that all the leases now has hashRanges set.
|
// Assert that all the leases now has hashRanges set.
|
||||||
for(Lease lease : multiStreamLeases) {
|
for (Lease lease : multiStreamLeases) {
|
||||||
Assert.assertNotNull(lease.hashKeyRangeForLease());
|
Assert.assertNotNull(lease.hashKeyRangeForLease());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -390,7 +390,7 @@ public class PeriodicShardSyncManagerTest {
|
||||||
lease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-"+(++leaseCounter[0])));
|
lease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-"+(++leaseCounter[0])));
|
||||||
lease.shardId("shard-"+(leaseCounter[0]));
|
lease.shardId("shard-"+(leaseCounter[0]));
|
||||||
// Setting the hashrange only for last two leases
|
// Setting the hashrange only for last two leases
|
||||||
if(leaseCounter[0] >= 3) {
|
if (leaseCounter[0] >= 3) {
|
||||||
lease.hashKeyRange(hashKeyRangeForLease);
|
lease.hashKeyRange(hashKeyRangeForLease);
|
||||||
}
|
}
|
||||||
lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||||
|
|
@ -403,14 +403,14 @@ public class PeriodicShardSyncManagerTest {
|
||||||
Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync());
|
Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync());
|
||||||
|
|
||||||
// Assert that all the leases now has hashRanges set.
|
// Assert that all the leases now has hashRanges set.
|
||||||
for(Lease lease : multiStreamLeases) {
|
for (Lease lease : multiStreamLeases) {
|
||||||
Assert.assertNotNull(lease.hashKeyRangeForLease());
|
Assert.assertNotNull(lease.hashKeyRangeForLease());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testFor1000DifferentValidSplitHierarchyTreeTheHashRangesAreAlwaysComplete() {
|
public void testFor1000DifferentValidSplitHierarchyTreeTheHashRangesAreAlwaysComplete() {
|
||||||
for(int i=0; i < 1000; i++) {
|
for (int i=0; i < 1000; i++) {
|
||||||
int maxInitialLeaseCount = 100;
|
int maxInitialLeaseCount = 100;
|
||||||
List<Lease> leases = generateInitialLeases(maxInitialLeaseCount);
|
List<Lease> leases = generateInitialLeases(maxInitialLeaseCount);
|
||||||
reshard(leases, 5, ReshardType.SPLIT, maxInitialLeaseCount, false);
|
reshard(leases, 5, ReshardType.SPLIT, maxInitialLeaseCount, false);
|
||||||
|
|
@ -514,7 +514,7 @@ public class PeriodicShardSyncManagerTest {
|
||||||
for (int i = 0; i < leasesToMerge; i += 2) {
|
for (int i = 0; i < leasesToMerge; i += 2) {
|
||||||
Lease parent1 = leasesEligibleForMerge.get(i);
|
Lease parent1 = leasesEligibleForMerge.get(i);
|
||||||
Lease parent2 = leasesEligibleForMerge.get(i + 1);
|
Lease parent2 = leasesEligibleForMerge.get(i + 1);
|
||||||
if(parent2.hashKeyRangeForLease().startingHashKey().subtract(parent1.hashKeyRangeForLease().endingHashKey()).equals(BigInteger.ONE))
|
if (parent2.hashKeyRangeForLease().startingHashKey().subtract(parent1.hashKeyRangeForLease().endingHashKey()).equals(BigInteger.ONE))
|
||||||
{
|
{
|
||||||
parent1.checkpoint(ExtendedSequenceNumber.SHARD_END);
|
parent1.checkpoint(ExtendedSequenceNumber.SHARD_END);
|
||||||
if (!shouldKeepSomeParentsInProgress || (shouldKeepSomeParentsInProgress && isOneFromDiceRoll())) {
|
if (!shouldKeepSomeParentsInProgress || (shouldKeepSomeParentsInProgress && isOneFromDiceRoll())) {
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,6 @@ import static org.mockito.Mockito.times;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
import static org.mockito.internal.verification.VerificationModeFactory.atMost;
|
import static org.mockito.internal.verification.VerificationModeFactory.atMost;
|
||||||
import static software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.*;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
|
@ -104,6 +103,9 @@ import software.amazon.kinesis.metrics.MetricsFactory;
|
||||||
import software.amazon.kinesis.metrics.MetricsConfig;
|
import software.amazon.kinesis.metrics.MetricsConfig;
|
||||||
import software.amazon.kinesis.processor.Checkpointer;
|
import software.amazon.kinesis.processor.Checkpointer;
|
||||||
import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy;
|
import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy;
|
||||||
|
import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.AutoDetectionAndDeferredDeletionStrategy;
|
||||||
|
import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.NoLeaseDeletionStrategy;
|
||||||
|
import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.ProvidedStreamsDeferredDeletionStrategy;
|
||||||
import software.amazon.kinesis.processor.MultiStreamTracker;
|
import software.amazon.kinesis.processor.MultiStreamTracker;
|
||||||
import software.amazon.kinesis.processor.ProcessorConfig;
|
import software.amazon.kinesis.processor.ProcessorConfig;
|
||||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
||||||
|
|
@ -727,8 +729,8 @@ public class SchedulerTest {
|
||||||
boolean expectPendingStreamsForDeletion,
|
boolean expectPendingStreamsForDeletion,
|
||||||
boolean onlyStreamsNoLeasesDeletion)
|
boolean onlyStreamsNoLeasesDeletion)
|
||||||
throws DependencyException, ProvisionedThroughputException, InvalidStateException {
|
throws DependencyException, ProvisionedThroughputException, InvalidStateException {
|
||||||
List<StreamConfig> streamConfigList1 = createDummyStreamConfigList(1,5);
|
List<StreamConfig> streamConfigList1 = createDummyStreamConfigList(1, 5);
|
||||||
List<StreamConfig> streamConfigList2 = createDummyStreamConfigList(3,7);
|
List<StreamConfig> streamConfigList2 = createDummyStreamConfigList(3, 7);
|
||||||
retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName)
|
retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName)
|
||||||
.retrievalFactory(retrievalFactory);
|
.retrievalFactory(retrievalFactory);
|
||||||
when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2);
|
when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2);
|
||||||
|
|
@ -742,7 +744,7 @@ public class SchedulerTest {
|
||||||
Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)))
|
Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)))
|
||||||
.collect(Collectors.toCollection(HashSet::new));
|
.collect(Collectors.toCollection(HashSet::new));
|
||||||
|
|
||||||
if(onlyStreamsNoLeasesDeletion) {
|
if (onlyStreamsNoLeasesDeletion) {
|
||||||
expectedSyncedStreams = IntStream.concat(IntStream.range(1, 3), IntStream.range(5, 7))
|
expectedSyncedStreams = IntStream.concat(IntStream.range(1, 3), IntStream.range(5, 7))
|
||||||
.mapToObj(streamId -> StreamIdentifier.multiStreamInstance(
|
.mapToObj(streamId -> StreamIdentifier.multiStreamInstance(
|
||||||
Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)))
|
Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)))
|
||||||
|
|
@ -756,7 +758,7 @@ public class SchedulerTest {
|
||||||
|
|
||||||
Assert.assertEquals(expectedSyncedStreams, syncedStreams);
|
Assert.assertEquals(expectedSyncedStreams, syncedStreams);
|
||||||
List<StreamConfig> expectedCurrentStreamConfigs;
|
List<StreamConfig> expectedCurrentStreamConfigs;
|
||||||
if(onlyStreamsNoLeasesDeletion) {
|
if (onlyStreamsNoLeasesDeletion) {
|
||||||
expectedCurrentStreamConfigs = IntStream.range(3, 7).mapToObj(streamId -> new StreamConfig(
|
expectedCurrentStreamConfigs = IntStream.range(3, 7).mapToObj(streamId -> new StreamConfig(
|
||||||
StreamIdentifier.multiStreamInstance(
|
StreamIdentifier.multiStreamInstance(
|
||||||
Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)),
|
Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)),
|
||||||
|
|
@ -778,8 +780,8 @@ public class SchedulerTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testKinesisStaleDeletedStreamCleanup() throws ProvisionedThroughputException, InvalidStateException, DependencyException {
|
public void testKinesisStaleDeletedStreamCleanup() throws ProvisionedThroughputException, InvalidStateException, DependencyException {
|
||||||
List<StreamConfig> streamConfigList1 = createDummyStreamConfigList(1,6);
|
List<StreamConfig> streamConfigList1 = createDummyStreamConfigList(1, 6);
|
||||||
List<StreamConfig> streamConfigList2 = createDummyStreamConfigList(1,4);
|
List<StreamConfig> streamConfigList2 = createDummyStreamConfigList(1, 4);
|
||||||
|
|
||||||
prepareForStaleDeletedStreamCleanupTests(streamConfigList1, streamConfigList2);
|
prepareForStaleDeletedStreamCleanupTests(streamConfigList1, streamConfigList2);
|
||||||
|
|
||||||
|
|
@ -820,7 +822,7 @@ public class SchedulerTest {
|
||||||
@Test
|
@Test
|
||||||
public void testKinesisStaleDeletedStreamNoCleanUpForTrackedStream()
|
public void testKinesisStaleDeletedStreamNoCleanUpForTrackedStream()
|
||||||
throws ProvisionedThroughputException, InvalidStateException, DependencyException {
|
throws ProvisionedThroughputException, InvalidStateException, DependencyException {
|
||||||
List<StreamConfig> streamConfigList1 = createDummyStreamConfigList(1,6);
|
List<StreamConfig> streamConfigList1 = createDummyStreamConfigList(1, 6);
|
||||||
prepareForStaleDeletedStreamCleanupTests(streamConfigList1);
|
prepareForStaleDeletedStreamCleanupTests(streamConfigList1);
|
||||||
|
|
||||||
scheduler.deletedStreamListProvider().add(createDummyStreamConfig(3).streamIdentifier());
|
scheduler.deletedStreamListProvider().add(createDummyStreamConfig(3).streamIdentifier());
|
||||||
|
|
@ -1243,7 +1245,7 @@ public class SchedulerTest {
|
||||||
@Override
|
@Override
|
||||||
public ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory,
|
public ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory,
|
||||||
StreamConfig streamConfig, DeletedStreamListProvider deletedStreamListProvider) {
|
StreamConfig streamConfig, DeletedStreamListProvider deletedStreamListProvider) {
|
||||||
if(shouldReturnDefaultShardSyncTaskmanager) {
|
if (shouldReturnDefaultShardSyncTaskmanager) {
|
||||||
return shardSyncTaskManager;
|
return shardSyncTaskManager;
|
||||||
}
|
}
|
||||||
final ShardSyncTaskManager shardSyncTaskManager = mock(ShardSyncTaskManager.class);
|
final ShardSyncTaskManager shardSyncTaskManager = mock(ShardSyncTaskManager.class);
|
||||||
|
|
@ -1255,7 +1257,7 @@ public class SchedulerTest {
|
||||||
when(shardSyncTaskManager.hierarchicalShardSyncer()).thenReturn(hierarchicalShardSyncer);
|
when(shardSyncTaskManager.hierarchicalShardSyncer()).thenReturn(hierarchicalShardSyncer);
|
||||||
when(shardDetector.streamIdentifier()).thenReturn(streamConfig.streamIdentifier());
|
when(shardDetector.streamIdentifier()).thenReturn(streamConfig.streamIdentifier());
|
||||||
when(shardSyncTaskManager.callShardSyncTask()).thenReturn(new TaskResult(null));
|
when(shardSyncTaskManager.callShardSyncTask()).thenReturn(new TaskResult(null));
|
||||||
if(shardSyncFirstAttemptFailure) {
|
if (shardSyncFirstAttemptFailure) {
|
||||||
when(shardDetector.listShards())
|
when(shardDetector.listShards())
|
||||||
.thenThrow(new RuntimeException("Service Exception"))
|
.thenThrow(new RuntimeException("Service Exception"))
|
||||||
.thenReturn(Collections.EMPTY_LIST);
|
.thenReturn(Collections.EMPTY_LIST);
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,6 @@
|
||||||
*/
|
*/
|
||||||
package software.amazon.kinesis.coordinator;
|
package software.amazon.kinesis.coordinator;
|
||||||
|
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unit tests of Worker.
|
* Unit tests of Worker.
|
||||||
*/
|
*/
|
||||||
|
|
@ -118,8 +116,7 @@ public class WorkerTest {
|
||||||
|
|
||||||
private static final IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY_V2 = SAMPLE_RECORD_PROCESSOR_FACTORY;
|
private static final IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY_V2 = SAMPLE_RECORD_PROCESSOR_FACTORY;
|
||||||
|
|
||||||
|
*//*
|
||||||
*//**
|
|
||||||
* Test method for {@link Worker#getApplicationName()}.
|
* Test method for {@link Worker#getApplicationName()}.
|
||||||
*//*
|
*//*
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -346,7 +343,7 @@ public class WorkerTest {
|
||||||
Assert.assertTrue(count > 0);
|
Assert.assertTrue(count > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
*//**
|
*//*
|
||||||
* Runs worker with threadPoolSize == numShards
|
* Runs worker with threadPoolSize == numShards
|
||||||
* Test method for {@link Worker#run()}.
|
* Test method for {@link Worker#run()}.
|
||||||
*//*
|
*//*
|
||||||
|
|
@ -357,7 +354,7 @@ public class WorkerTest {
|
||||||
runAndTestWorker(numShards, threadPoolSize);
|
runAndTestWorker(numShards, threadPoolSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
*//**
|
*//*
|
||||||
* Runs worker with threadPoolSize < numShards
|
* Runs worker with threadPoolSize < numShards
|
||||||
* Test method for {@link Worker#run()}.
|
* Test method for {@link Worker#run()}.
|
||||||
*//*
|
*//*
|
||||||
|
|
@ -368,7 +365,7 @@ public class WorkerTest {
|
||||||
runAndTestWorker(numShards, threadPoolSize);
|
runAndTestWorker(numShards, threadPoolSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
*//**
|
*//*
|
||||||
* Runs worker with threadPoolSize > numShards
|
* Runs worker with threadPoolSize > numShards
|
||||||
* Test method for {@link Worker#run()}.
|
* Test method for {@link Worker#run()}.
|
||||||
*//*
|
*//*
|
||||||
|
|
@ -379,7 +376,7 @@ public class WorkerTest {
|
||||||
runAndTestWorker(numShards, threadPoolSize);
|
runAndTestWorker(numShards, threadPoolSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
*//**
|
*//*
|
||||||
* Runs worker with threadPoolSize < numShards
|
* Runs worker with threadPoolSize < numShards
|
||||||
* Test method for {@link Worker#run()}.
|
* Test method for {@link Worker#run()}.
|
||||||
*//*
|
*//*
|
||||||
|
|
@ -395,7 +392,7 @@ public class WorkerTest {
|
||||||
runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard, config);
|
runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard, config);
|
||||||
}
|
}
|
||||||
|
|
||||||
*//**
|
*//*
|
||||||
* Runs worker with threadPoolSize < numShards
|
* Runs worker with threadPoolSize < numShards
|
||||||
* Test method for {@link Worker#run()}.
|
* Test method for {@link Worker#run()}.
|
||||||
*//*
|
*//*
|
||||||
|
|
@ -557,7 +554,7 @@ public class WorkerTest {
|
||||||
verify(v2RecordProcessor, times(1)).shutdown(any(ShutdownInput.class));
|
verify(v2RecordProcessor, times(1)).shutdown(any(ShutdownInput.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
*//**
|
*//*
|
||||||
* This test is testing the {@link Worker}'s shutdown behavior and by extension the behavior of
|
* This test is testing the {@link Worker}'s shutdown behavior and by extension the behavior of
|
||||||
* {@link ThreadPoolExecutor#shutdownNow()}. It depends on the thread pool sending an interrupt to the pool threads.
|
* {@link ThreadPoolExecutor#shutdownNow()}. It depends on the thread pool sending an interrupt to the pool threads.
|
||||||
* This behavior makes the test a bit racy, since we need to ensure a specific order of events.
|
* This behavior makes the test a bit racy, since we need to ensure a specific order of events.
|
||||||
|
|
@ -1734,7 +1731,8 @@ public class WorkerTest {
|
||||||
return new ReflectionFieldMatcher<>(itemClass, fieldName, fieldMatcher);
|
return new ReflectionFieldMatcher<>(itemClass, fieldName, fieldMatcher);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*//**
|
|
||||||
|
*//*
|
||||||
* Returns executor service that will be owned by the worker. This is useful to test the scenario
|
* Returns executor service that will be owned by the worker. This is useful to test the scenario
|
||||||
* where worker shuts down the executor service also during shutdown flow.
|
* where worker shuts down the executor service also during shutdown flow.
|
||||||
*
|
*
|
||||||
|
|
@ -1756,9 +1754,6 @@ public class WorkerTest {
|
||||||
return shards;
|
return shards;
|
||||||
}
|
}
|
||||||
|
|
||||||
*//**
|
|
||||||
* @return
|
|
||||||
*//*
|
|
||||||
private List<Shard> createShardListWithOneSplit() {
|
private List<Shard> createShardListWithOneSplit() {
|
||||||
List<Shard> shards = new ArrayList<Shard>();
|
List<Shard> shards = new ArrayList<Shard>();
|
||||||
SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("39428", "987324");
|
SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("39428", "987324");
|
||||||
|
|
|
||||||
|
|
@ -1592,7 +1592,7 @@ public class HierarchicalShardSyncerTest {
|
||||||
assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON);
|
assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* <pre>
|
* <pre>
|
||||||
* Shard structure (x-axis is epochs):
|
* Shard structure (x-axis is epochs):
|
||||||
* 0 3 6 9
|
* 0 3 6 9
|
||||||
|
|
@ -1869,7 +1869,7 @@ public class HierarchicalShardSyncerTest {
|
||||||
assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP);
|
assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* <pre>
|
* <pre>
|
||||||
* Shard structure (x-axis is epochs):
|
* Shard structure (x-axis is epochs):
|
||||||
* 0 3 6 9
|
* 0 3 6 9
|
||||||
|
|
@ -2325,12 +2325,16 @@ public class HierarchicalShardSyncerTest {
|
||||||
@Test
|
@Test
|
||||||
public void testEmptyLeaseTablePopulatesLeasesWithCompleteHashRangeAfterTwoRetries() throws Exception {
|
public void testEmptyLeaseTablePopulatesLeasesWithCompleteHashRangeAfterTwoRetries() throws Exception {
|
||||||
final List<Shard> shardsWithIncompleteHashRange = Arrays.asList(
|
final List<Shard> shardsWithIncompleteHashRange = Arrays.asList(
|
||||||
ShardObjectHelper.newShard("shardId-0", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "69")),
|
ShardObjectHelper.newShard("shardId-0", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"),
|
||||||
ShardObjectHelper.newShard("shardId-1", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange("71", ShardObjectHelper.MAX_HASH_KEY))
|
ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "69")),
|
||||||
|
ShardObjectHelper.newShard("shardId-1", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"),
|
||||||
|
ShardObjectHelper.newHashKeyRange("71", ShardObjectHelper.MAX_HASH_KEY))
|
||||||
);
|
);
|
||||||
final List<Shard> shardsWithCompleteHashRange = Arrays.asList(
|
final List<Shard> shardsWithCompleteHashRange = Arrays.asList(
|
||||||
ShardObjectHelper.newShard("shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")),
|
ShardObjectHelper.newShard("shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"),
|
||||||
ShardObjectHelper.newShard("shardId-3", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY))
|
ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")),
|
||||||
|
ShardObjectHelper.newShard("shardId-3", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"),
|
||||||
|
ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY))
|
||||||
);
|
);
|
||||||
|
|
||||||
when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true);
|
when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true);
|
||||||
|
|
@ -2352,8 +2356,10 @@ public class HierarchicalShardSyncerTest {
|
||||||
@Test
|
@Test
|
||||||
public void testEmptyLeaseTablePopulatesLeasesWithCompleteHashRange() throws Exception {
|
public void testEmptyLeaseTablePopulatesLeasesWithCompleteHashRange() throws Exception {
|
||||||
final List<Shard> shardsWithCompleteHashRange = Arrays.asList(
|
final List<Shard> shardsWithCompleteHashRange = Arrays.asList(
|
||||||
ShardObjectHelper.newShard("shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")),
|
ShardObjectHelper.newShard("shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"),
|
||||||
ShardObjectHelper.newShard("shardId-3", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY))
|
ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")),
|
||||||
|
ShardObjectHelper.newShard("shardId-3", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"),
|
||||||
|
ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY))
|
||||||
);
|
);
|
||||||
|
|
||||||
when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true);
|
when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true);
|
||||||
|
|
|
||||||
|
|
@ -14,10 +14,11 @@
|
||||||
*/
|
*/
|
||||||
package software.amazon.kinesis.leases;
|
package software.amazon.kinesis.leases;
|
||||||
|
|
||||||
import java.awt.*;
|
import java.awt.Button;
|
||||||
|
import java.awt.Dimension;
|
||||||
|
import java.awt.GridLayout;
|
||||||
import java.awt.event.ActionEvent;
|
import java.awt.event.ActionEvent;
|
||||||
import java.awt.event.ActionListener;
|
import java.awt.event.ActionListener;
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
|
@ -25,7 +26,10 @@ import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.swing.*;
|
import javax.swing.BoxLayout;
|
||||||
|
import javax.swing.JFrame;
|
||||||
|
import javax.swing.JLabel;
|
||||||
|
import javax.swing.JPanel;
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
|
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
|
||||||
|
|
@ -54,9 +58,8 @@ public class LeaseCoordinatorExerciser {
|
||||||
private static final long INITIAL_LEASE_TABLE_READ_CAPACITY = 10L;
|
private static final long INITIAL_LEASE_TABLE_READ_CAPACITY = 10L;
|
||||||
private static final long INITIAL_LEASE_TABLE_WRITE_CAPACITY = 50L;
|
private static final long INITIAL_LEASE_TABLE_WRITE_CAPACITY = 50L;
|
||||||
|
|
||||||
public static void main(String[] args) throws InterruptedException, DependencyException, InvalidStateException,
|
public static void main(String[] args) throws DependencyException, InvalidStateException,
|
||||||
ProvisionedThroughputException, IOException {
|
ProvisionedThroughputException {
|
||||||
|
|
||||||
int numCoordinators = 9;
|
int numCoordinators = 9;
|
||||||
int numLeases = 73;
|
int numLeases = 73;
|
||||||
int leaseDurationMillis = 10000;
|
int leaseDurationMillis = 10000;
|
||||||
|
|
|
||||||
|
|
@ -15,16 +15,8 @@
|
||||||
package software.amazon.kinesis.leases;
|
package software.amazon.kinesis.leases;
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import org.junit.Rule;
|
|
||||||
import org.junit.rules.TestWatcher;
|
|
||||||
import org.junit.runner.Description;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
||||||
import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher;
|
import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher;
|
||||||
import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseSerializer;
|
|
||||||
import software.amazon.kinesis.leases.dynamodb.TableCreatorCallback;
|
|
||||||
|
|
||||||
@Slf4j
|
@Slf4j
|
||||||
public class LeaseIntegrationBillingModePayPerRequestTest extends LeaseIntegrationTest {
|
public class LeaseIntegrationBillingModePayPerRequestTest extends LeaseIntegrationTest {
|
||||||
|
|
|
||||||
|
|
@ -26,11 +26,9 @@ import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import org.junit.Assert;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import software.amazon.kinesis.leases.ShardInfo;
|
|
||||||
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
||||||
|
|
||||||
public class ShardInfoTest {
|
public class ShardInfoTest {
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,6 @@ public class ShardObjectHelper {
|
||||||
private ShardObjectHelper() {
|
private ShardObjectHelper() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** Helper method to create a new shard object.
|
/** Helper method to create a new shard object.
|
||||||
* @param shardId
|
* @param shardId
|
||||||
* @param parentShardId
|
* @param parentShardId
|
||||||
|
|
@ -84,7 +83,9 @@ public class ShardObjectHelper {
|
||||||
String adjacentParentShardId,
|
String adjacentParentShardId,
|
||||||
SequenceNumberRange sequenceNumberRange,
|
SequenceNumberRange sequenceNumberRange,
|
||||||
HashKeyRange hashKeyRange) {
|
HashKeyRange hashKeyRange) {
|
||||||
return Shard.builder().shardId(shardId).parentShardId(parentShardId).adjacentParentShardId(adjacentParentShardId).sequenceNumberRange(sequenceNumberRange).hashKeyRange(hashKeyRange).build();
|
return Shard.builder().shardId(shardId).parentShardId(parentShardId)
|
||||||
|
.adjacentParentShardId(adjacentParentShardId).sequenceNumberRange(sequenceNumberRange)
|
||||||
|
.hashKeyRange(hashKeyRange).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Helper method.
|
/** Helper method.
|
||||||
|
|
@ -116,5 +117,4 @@ public class ShardObjectHelper {
|
||||||
return parentShardIds;
|
return parentShardIds;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.assertThat;
|
import static org.junit.Assert.assertThat;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
package software.amazon.kinesis.leases.dynamodb;
|
package software.amazon.kinesis.leases.dynamodb;
|
||||||
|
|
||||||
import org.junit.Assert;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
|
|
@ -12,7 +11,7 @@ import software.amazon.kinesis.metrics.MetricsFactory;
|
||||||
|
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import static org.mockito.Mockito.times;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
|
@ -51,17 +50,34 @@ public class DynamoDBLeaseCoordinatorTest {
|
||||||
|
|
||||||
leaseCoordinator.initialize();
|
leaseCoordinator.initialize();
|
||||||
|
|
||||||
verify(leaseRefresher, times(1)).createLeaseTableIfNotExists();
|
verify(leaseRefresher).createLeaseTableIfNotExists();
|
||||||
verify(leaseRefresher, times(1)).waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS);
|
verify(leaseRefresher).waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test(expected = DependencyException.class)
|
||||||
public void testInitialize_tableCreationFails() throws Exception {
|
public void testInitialize_tableCreationFails() throws Exception {
|
||||||
when(leaseRefresher.createLeaseTableIfNotExists()).thenReturn(false);
|
when(leaseRefresher.createLeaseTableIfNotExists()).thenReturn(false);
|
||||||
when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)).thenReturn(false);
|
when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)).thenReturn(false);
|
||||||
|
|
||||||
Assert.assertThrows(DependencyException.class, () -> leaseCoordinator.initialize());
|
try {
|
||||||
verify(leaseRefresher, times(1)).createLeaseTableIfNotExists();
|
leaseCoordinator.initialize();
|
||||||
verify(leaseRefresher, times(1)).waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS);
|
} finally {
|
||||||
|
verify(leaseRefresher).createLeaseTableIfNotExists();
|
||||||
|
verify(leaseRefresher).waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates a {@link NullPointerException} is not thrown when the lease taker
|
||||||
|
* is stopped before it starts/exists.
|
||||||
|
*
|
||||||
|
* @see <a href="https://github.com/awslabs/amazon-kinesis-client/issues/745">issue #745</a>
|
||||||
|
* @see <a href="https://github.com/awslabs/amazon-kinesis-client/issues/900">issue #900</a>
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testStopLeaseTakerBeforeStart() {
|
||||||
|
leaseCoordinator.stopLeaseTaker();
|
||||||
|
assertTrue(leaseCoordinator.getAssignments().isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,6 @@ import static org.mockito.Mockito.times;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
|
@ -158,7 +157,6 @@ public class DynamoDBLeaseRefresherTest {
|
||||||
|
|
||||||
verify(mockScanFuture, times(2)).get(anyLong(), any(TimeUnit.class));
|
verify(mockScanFuture, times(2)).get(anyLong(), any(TimeUnit.class));
|
||||||
verify(dynamoDbClient, times(2)).scan(any(ScanRequest.class));
|
verify(dynamoDbClient, times(2)).scan(any(ScanRequest.class));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ import static org.junit.Assert.assertThat;
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends
|
public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends
|
||||||
LeaseIntegrationBillingModePayPerRequestTest {
|
LeaseIntegrationBillingModePayPerRequestTest {
|
||||||
private final String TEST_METRIC = "TestOperation";
|
private static final String TEST_METRIC = "TestOperation";
|
||||||
|
|
||||||
// This test case's leases last 2 seconds
|
// This test case's leases last 2 seconds
|
||||||
private static final long LEASE_DURATION_MILLIS = 2000L;
|
private static final long LEASE_DURATION_MILLIS = 2000L;
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@ import software.amazon.kinesis.metrics.NullMetricsFactory;
|
||||||
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest {
|
public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest {
|
||||||
private final String TEST_METRIC = "TestOperation";
|
private static final String TEST_METRIC = "TestOperation";
|
||||||
|
|
||||||
// This test case's leases last 2 seconds
|
// This test case's leases last 2 seconds
|
||||||
private static final long LEASE_DURATION_MILLIS = 2000L;
|
private static final long LEASE_DURATION_MILLIS = 2000L;
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ public class DynamoDBLeaseRenewerTest {
|
||||||
*/
|
*/
|
||||||
Lease lease1 = newLease("1");
|
Lease lease1 = newLease("1");
|
||||||
Lease lease2 = newLease("2");
|
Lease lease2 = newLease("2");
|
||||||
leasesToRenew = Arrays.asList(lease1,lease2);
|
leasesToRenew = Arrays.asList(lease1, lease2);
|
||||||
renewer.addLeasesToRenew(leasesToRenew);
|
renewer.addLeasesToRenew(leasesToRenew);
|
||||||
|
|
||||||
doReturn(true).when(leaseRefresher).renewLease(lease1);
|
doReturn(true).when(leaseRefresher).renewLease(lease1);
|
||||||
|
|
|
||||||
|
|
@ -15,9 +15,7 @@
|
||||||
package software.amazon.kinesis.leases.dynamodb;
|
package software.amazon.kinesis.leases.dynamodb;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
@ -153,7 +151,6 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest {
|
||||||
assertThat(addedLeases.values().containsAll(allLeases), equalTo(true));
|
assertThat(addedLeases.values().containsAll(allLeases), equalTo(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the leaseDurationMillis to 0, ensuring a get request to update the existing lease after computing
|
* Sets the leaseDurationMillis to 0, ensuring a get request to update the existing lease after computing
|
||||||
* leases to take
|
* leases to take
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,6 @@ import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
@ -43,7 +42,7 @@ public class BlockOnParentShardTaskTest {
|
||||||
private final String shardId = "shardId-97";
|
private final String shardId = "shardId-97";
|
||||||
private final String streamId = "123:stream:146";
|
private final String streamId = "123:stream:146";
|
||||||
private final String concurrencyToken = "testToken";
|
private final String concurrencyToken = "testToken";
|
||||||
private final List<String> emptyParentShardIds = new ArrayList<String>();
|
private final List<String> emptyParentShardIds = new ArrayList<>();
|
||||||
private ShardInfo shardInfo;
|
private ShardInfo shardInfo;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
|
@ -77,7 +76,6 @@ public class BlockOnParentShardTaskTest {
|
||||||
@Test
|
@Test
|
||||||
public final void testCallShouldNotThrowBlockedOnParentWhenParentsHaveFinished()
|
public final void testCallShouldNotThrowBlockedOnParentWhenParentsHaveFinished()
|
||||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
|
||||||
ShardInfo shardInfo = null;
|
ShardInfo shardInfo = null;
|
||||||
BlockOnParentShardTask task = null;
|
BlockOnParentShardTask task = null;
|
||||||
String parent1ShardId = "shardId-1";
|
String parent1ShardId = "shardId-1";
|
||||||
|
|
@ -118,7 +116,6 @@ public class BlockOnParentShardTaskTest {
|
||||||
@Test
|
@Test
|
||||||
public final void testCallShouldNotThrowBlockedOnParentWhenParentsHaveFinishedMultiStream()
|
public final void testCallShouldNotThrowBlockedOnParentWhenParentsHaveFinishedMultiStream()
|
||||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
|
||||||
ShardInfo shardInfo = null;
|
ShardInfo shardInfo = null;
|
||||||
BlockOnParentShardTask task = null;
|
BlockOnParentShardTask task = null;
|
||||||
String parent1LeaseKey = streamId + ":" + "shardId-1";
|
String parent1LeaseKey = streamId + ":" + "shardId-1";
|
||||||
|
|
@ -162,7 +159,6 @@ public class BlockOnParentShardTaskTest {
|
||||||
@Test
|
@Test
|
||||||
public final void testCallWhenParentsHaveNotFinished()
|
public final void testCallWhenParentsHaveNotFinished()
|
||||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
|
||||||
ShardInfo shardInfo = null;
|
ShardInfo shardInfo = null;
|
||||||
BlockOnParentShardTask task = null;
|
BlockOnParentShardTask task = null;
|
||||||
String parent1ShardId = "shardId-1";
|
String parent1ShardId = "shardId-1";
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,6 @@ import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.concurrent.CyclicBarrier;
|
import java.util.concurrent.CyclicBarrier;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
|
|
@ -173,7 +172,6 @@ public class ShardConsumerSubscriberTest {
|
||||||
assertThat(subscriber.getAndResetDispatchFailure(), nullValue());
|
assertThat(subscriber.getAndResetDispatchFailure(), nullValue());
|
||||||
|
|
||||||
verify(shardConsumer, times(20)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class));
|
verify(shardConsumer, times(20)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -293,12 +291,10 @@ public class ShardConsumerSubscriberTest {
|
||||||
assertThat(received.size(), equalTo(recordsPublisher.responses.size()));
|
assertThat(received.size(), equalTo(recordsPublisher.responses.size()));
|
||||||
Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i),
|
Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i),
|
||||||
eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput())));
|
eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput())));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void restartAfterRequestTimerExpiresWhenNotGettingRecordsAfterInitialization() throws Exception {
|
public void restartAfterRequestTimerExpiresWhenNotGettingRecordsAfterInitialization() throws Exception {
|
||||||
|
|
||||||
executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
|
executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
|
||||||
.setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build());
|
.setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build());
|
||||||
|
|
||||||
|
|
@ -347,12 +343,10 @@ public class ShardConsumerSubscriberTest {
|
||||||
assertThat(received.size(), equalTo(recordsPublisher.responses.size()));
|
assertThat(received.size(), equalTo(recordsPublisher.responses.size()));
|
||||||
Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i),
|
Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i),
|
||||||
eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput())));
|
eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput())));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void restartAfterRequestTimerExpiresWhenInitialTaskExecutionIsRejected() throws Exception {
|
public void restartAfterRequestTimerExpiresWhenInitialTaskExecutionIsRejected() throws Exception {
|
||||||
|
|
||||||
executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
|
executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
|
||||||
.setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build());
|
.setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build());
|
||||||
|
|
||||||
|
|
@ -405,7 +399,6 @@ public class ShardConsumerSubscriberTest {
|
||||||
assertThat(received.size(), equalTo(recordsPublisher.responses.size()));
|
assertThat(received.size(), equalTo(recordsPublisher.responses.size()));
|
||||||
Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i),
|
Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i),
|
||||||
eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput())));
|
eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput())));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private Object directlyExecuteRunnable(InvocationOnMock invocation) {
|
private Object directlyExecuteRunnable(InvocationOnMock invocation) {
|
||||||
|
|
@ -623,8 +616,6 @@ public class ShardConsumerSubscriberTest {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test to validate the warning message from ShardConsumer is not suppressed with the default configuration of 0
|
* Test to validate the warning message from ShardConsumer is not suppressed with the default configuration of 0
|
||||||
*
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void noLoggingSuppressionNeededOnHappyPathTest() {
|
public void noLoggingSuppressionNeededOnHappyPathTest() {
|
||||||
|
|
@ -648,8 +639,6 @@ public class ShardConsumerSubscriberTest {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test to validate the warning message from ShardConsumer is not suppressed with the default configuration of 0
|
* Test to validate the warning message from ShardConsumer is not suppressed with the default configuration of 0
|
||||||
*
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void loggingNotSuppressedAfterTimeoutTest() {
|
public void loggingNotSuppressedAfterTimeoutTest() {
|
||||||
|
|
@ -677,8 +666,6 @@ public class ShardConsumerSubscriberTest {
|
||||||
/**
|
/**
|
||||||
* Test to validate the warning message from ShardConsumer is successfully supressed if we only have intermittant
|
* Test to validate the warning message from ShardConsumer is successfully supressed if we only have intermittant
|
||||||
* readTimeouts.
|
* readTimeouts.
|
||||||
*
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void loggingSuppressedAfterIntermittentTimeoutTest() {
|
public void loggingSuppressedAfterIntermittentTimeoutTest() {
|
||||||
|
|
@ -705,8 +692,6 @@ public class ShardConsumerSubscriberTest {
|
||||||
/**
|
/**
|
||||||
* Test to validate the warning message from ShardConsumer is successfully logged if multiple sequential timeouts
|
* Test to validate the warning message from ShardConsumer is successfully logged if multiple sequential timeouts
|
||||||
* occur.
|
* occur.
|
||||||
*
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void loggingPartiallySuppressedAfterMultipleTimeoutTest() {
|
public void loggingPartiallySuppressedAfterMultipleTimeoutTest() {
|
||||||
|
|
@ -733,8 +718,6 @@ public class ShardConsumerSubscriberTest {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test to validate the warning message from ShardConsumer is successfully logged if sequential timeouts occur.
|
* Test to validate the warning message from ShardConsumer is successfully logged if sequential timeouts occur.
|
||||||
*
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void loggingPartiallySuppressedAfterConsecutiveTimeoutTest() {
|
public void loggingPartiallySuppressedAfterConsecutiveTimeoutTest() {
|
||||||
|
|
@ -763,8 +746,6 @@ public class ShardConsumerSubscriberTest {
|
||||||
/**
|
/**
|
||||||
* Test to validate the non-timeout warning message from ShardConsumer is not suppressed with the default
|
* Test to validate the non-timeout warning message from ShardConsumer is not suppressed with the default
|
||||||
* configuration of 0
|
* configuration of 0
|
||||||
*
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void loggingNotSuppressedOnNonReadTimeoutExceptionNotIgnoringReadTimeoutsExceptionTest() {
|
public void loggingNotSuppressedOnNonReadTimeoutExceptionNotIgnoringReadTimeoutsExceptionTest() {
|
||||||
|
|
@ -792,12 +773,9 @@ public class ShardConsumerSubscriberTest {
|
||||||
/**
|
/**
|
||||||
* Test to validate the non-timeout warning message from ShardConsumer is not suppressed with 2 ReadTimeouts to
|
* Test to validate the non-timeout warning message from ShardConsumer is not suppressed with 2 ReadTimeouts to
|
||||||
* ignore
|
* ignore
|
||||||
*
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void loggingNotSuppressedOnNonReadTimeoutExceptionIgnoringReadTimeoutsTest() {
|
public void loggingNotSuppressedOnNonReadTimeoutExceptionIgnoringReadTimeoutsTest() {
|
||||||
|
|
||||||
// We're not throwing a ReadTimeout, so no suppression is expected.
|
// We're not throwing a ReadTimeout, so no suppression is expected.
|
||||||
// The test expects a non-ReadTimeout exception to be thrown on requests 3 and 5, and we expect logs on
|
// The test expects a non-ReadTimeout exception to be thrown on requests 3 and 5, and we expect logs on
|
||||||
// each Non-ReadTimeout Exception, no matter what the number of ReadTimeoutsToIgnore we pass in,
|
// each Non-ReadTimeout Exception, no matter what the number of ReadTimeoutsToIgnore we pass in,
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,6 @@ package software.amazon.kinesis.metrics;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
|
import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
|
||||||
import software.amazon.kinesis.metrics.EndingMetricsScope;
|
|
||||||
|
|
||||||
public class EndingMetricsScopeTest {
|
public class EndingMetricsScopeTest {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -47,8 +47,8 @@ public class MetricAccumulatingQueueTest {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testAccumulation() {
|
public void testAccumulation() {
|
||||||
Collection<Dimension> dimensionsA = Collections.singleton(dim("name","a"));
|
Collection<Dimension> dimensionsA = Collections.singleton(dim("name", "a"));
|
||||||
Collection<Dimension> dimensionsB = Collections.singleton(dim("name","b"));
|
Collection<Dimension> dimensionsB = Collections.singleton(dim("name", "b"));
|
||||||
String keyA = "a";
|
String keyA = "a";
|
||||||
String keyB = "b";
|
String keyB = "b";
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,31 +27,28 @@ import static org.junit.Assert.assertThat;
|
||||||
@Slf4j
|
@Slf4j
|
||||||
public class AWSExceptionManagerTest {
|
public class AWSExceptionManagerTest {
|
||||||
|
|
||||||
|
private static final String EXPECTED_HANDLING_MARKER = AWSExceptionManagerTest.class.getSimpleName();
|
||||||
|
|
||||||
|
private final AWSExceptionManager manager = new AWSExceptionManager();
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSpecificException() {
|
public void testSpecificException() {
|
||||||
AWSExceptionManager manager = new AWSExceptionManager();
|
|
||||||
final String EXPECTED_HANDLING_MARKER = "Handled-TestException";
|
|
||||||
|
|
||||||
manager.add(TestException.class, t -> {
|
manager.add(TestException.class, t -> {
|
||||||
log.info("Handling test exception: {} -> {}", t.getMessage(), t.getAdditionalMessage());
|
log.info("Handling test exception: {} -> {}", t.getMessage(), t.getAdditionalMessage());
|
||||||
return new RuntimeException(EXPECTED_HANDLING_MARKER, t);
|
return new RuntimeException(EXPECTED_HANDLING_MARKER, t);
|
||||||
});
|
});
|
||||||
|
|
||||||
TestException te = new TestException("Main Mesage", "Sub Message");
|
TestException te = new TestException("Main Message", "Sub Message");
|
||||||
|
|
||||||
|
|
||||||
RuntimeException converted = manager.apply(te);
|
RuntimeException converted = manager.apply(te);
|
||||||
|
|
||||||
assertThat(converted, isA(RuntimeException.class));
|
assertThat(converted, isA(RuntimeException.class));
|
||||||
assertThat(converted.getMessage(), equalTo(EXPECTED_HANDLING_MARKER));
|
assertThat(converted.getMessage(), equalTo(EXPECTED_HANDLING_MARKER));
|
||||||
assertThat(converted.getCause(), equalTo(te));
|
assertThat(converted.getCause(), equalTo(te));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testParentException() {
|
public void testParentException() {
|
||||||
AWSExceptionManager manager = new AWSExceptionManager();
|
|
||||||
final String EXPECTED_HANDLING_MARKER = "Handled-IllegalStateException";
|
|
||||||
manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i));
|
manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i));
|
||||||
manager.add(Exception.class, i -> new RuntimeException("RawException", i));
|
manager.add(Exception.class, i -> new RuntimeException("RawException", i));
|
||||||
manager.add(IllegalStateException.class, i -> new RuntimeException(EXPECTED_HANDLING_MARKER, i));
|
manager.add(IllegalStateException.class, i -> new RuntimeException(EXPECTED_HANDLING_MARKER, i));
|
||||||
|
|
@ -66,8 +63,7 @@ public class AWSExceptionManagerTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDefaultHandler() {
|
public void testDefaultHandler() {
|
||||||
final String EXPECTED_HANDLING_MARKER = "Handled-Default";
|
manager.defaultFunction(i -> new RuntimeException(EXPECTED_HANDLING_MARKER, i));
|
||||||
AWSExceptionManager manager = new AWSExceptionManager().defaultFunction(i -> new RuntimeException(EXPECTED_HANDLING_MARKER, i));
|
|
||||||
|
|
||||||
manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i));
|
manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i));
|
||||||
manager.add(Exception.class, i -> new RuntimeException("RawException", i));
|
manager.add(Exception.class, i -> new RuntimeException("RawException", i));
|
||||||
|
|
@ -83,8 +79,6 @@ public class AWSExceptionManagerTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testIdHandler() {
|
public void testIdHandler() {
|
||||||
AWSExceptionManager manager = new AWSExceptionManager();
|
|
||||||
|
|
||||||
manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i));
|
manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i));
|
||||||
manager.add(Exception.class, i -> new RuntimeException("RawException", i));
|
manager.add(Exception.class, i -> new RuntimeException("RawException", i));
|
||||||
manager.add(IllegalStateException.class, i -> i);
|
manager.add(IllegalStateException.class, i -> i);
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,6 @@ import org.junit.runner.RunWith;
|
||||||
import org.mockito.Mock;
|
import org.mockito.Mock;
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
import org.mockito.runners.MockitoJUnitRunner;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import software.amazon.kinesis.retrieval.ThrottlingReporter;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
public class ThrottlingReporterTest {
|
public class ThrottlingReporterTest {
|
||||||
|
|
@ -40,7 +39,6 @@ public class ThrottlingReporterTest {
|
||||||
reporter.throttled();
|
reporter.throttled();
|
||||||
verify(throttleLog).warn(anyString());
|
verify(throttleLog).warn(anyString());
|
||||||
verify(throttleLog, never()).error(anyString());
|
verify(throttleLog, never()).error(anyString());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -63,7 +61,6 @@ public class ThrottlingReporterTest {
|
||||||
reporter.throttled();
|
reporter.throttled();
|
||||||
verify(throttleLog, times(2)).warn(anyString());
|
verify(throttleLog, times(2)).warn(anyString());
|
||||||
verify(throttleLog, times(3)).error(anyString());
|
verify(throttleLog, times(3)).error(anyString());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private class LogTestingThrottingReporter extends ThrottlingReporter {
|
private class LogTestingThrottingReporter extends ThrottlingReporter {
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,6 @@ import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||||
import static org.junit.Assert.assertThat;
|
import static org.junit.Assert.assertThat;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.Matchers.any;
|
||||||
import static org.mockito.Matchers.eq;
|
|
||||||
import static org.mockito.Mockito.never;
|
import static org.mockito.Mockito.never;
|
||||||
import static org.mockito.Mockito.times;
|
import static org.mockito.Mockito.times;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
|
|
@ -28,7 +27,6 @@ import static org.mockito.Mockito.when;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.hamcrest.Matchers;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,6 @@ import io.reactivex.rxjava3.schedulers.Schedulers;
|
||||||
import io.reactivex.rxjava3.subscribers.SafeSubscriber;
|
import io.reactivex.rxjava3.subscribers.SafeSubscriber;
|
||||||
import lombok.Data;
|
import lombok.Data;
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
import lombok.Setter;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import org.hamcrest.Description;
|
import org.hamcrest.Description;
|
||||||
import org.hamcrest.Matcher;
|
import org.hamcrest.Matcher;
|
||||||
|
|
@ -54,7 +53,6 @@ import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.concurrent.BlockingQueue;
|
import java.util.concurrent.BlockingQueue;
|
||||||
import java.util.concurrent.CompletableFuture;
|
|
||||||
import java.util.concurrent.CompletionException;
|
import java.util.concurrent.CompletionException;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
|
|
@ -77,7 +75,6 @@ import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.Matchers.any;
|
||||||
import static org.mockito.Matchers.argThat;
|
import static org.mockito.Matchers.argThat;
|
||||||
import static org.mockito.Matchers.eq;
|
|
||||||
import static org.mockito.Mockito.doAnswer;
|
import static org.mockito.Mockito.doAnswer;
|
||||||
import static org.mockito.Mockito.doNothing;
|
import static org.mockito.Mockito.doNothing;
|
||||||
import static org.mockito.Mockito.never;
|
import static org.mockito.Mockito.never;
|
||||||
|
|
@ -172,11 +169,10 @@ public class FanOutRecordsPublisherTest {
|
||||||
assertThat(clientRecordsList.get(i), matchers.get(i));
|
assertThat(clientRecordsList.get(i), matchers.get(i));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void InvalidEventTest() throws Exception {
|
public void testInvalidEvent() {
|
||||||
FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN);
|
FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN);
|
||||||
|
|
||||||
ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor
|
ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor
|
||||||
|
|
@ -239,7 +235,6 @@ public class FanOutRecordsPublisherTest {
|
||||||
assertThat(clientRecordsList.get(i), matchers.get(i));
|
assertThat(clientRecordsList.get(i), matchers.get(i));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -317,11 +312,10 @@ public class FanOutRecordsPublisherTest {
|
||||||
});
|
});
|
||||||
|
|
||||||
assertThat(source.getCurrentSequenceNumber(), equalTo("3000"));
|
assertThat(source.getCurrentSequenceNumber(), equalTo("3000"));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testIfEventsAreNotDeliveredToShardConsumerWhenPreviousEventDeliveryTaskGetsRejected() throws Exception {
|
public void testIfEventsAreNotDeliveredToShardConsumerWhenPreviousEventDeliveryTaskGetsRejected() {
|
||||||
FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN);
|
FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN);
|
||||||
|
|
||||||
ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor
|
ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor
|
||||||
|
|
@ -395,7 +389,6 @@ public class FanOutRecordsPublisherTest {
|
||||||
});
|
});
|
||||||
|
|
||||||
assertThat(source.getCurrentSequenceNumber(), equalTo("1000"));
|
assertThat(source.getCurrentSequenceNumber(), equalTo("1000"));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -443,10 +436,11 @@ public class FanOutRecordsPublisherTest {
|
||||||
|
|
||||||
@Override public void onNext(RecordsRetrieved input) {
|
@Override public void onNext(RecordsRetrieved input) {
|
||||||
receivedInput.add(input.processRecordsInput());
|
receivedInput.add(input.processRecordsInput());
|
||||||
assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber());
|
assertEquals("" + ++lastSeenSeqNum,
|
||||||
|
((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber());
|
||||||
subscription.request(1);
|
subscription.request(1);
|
||||||
servicePublisher.request(1);
|
servicePublisher.request(1);
|
||||||
if(receivedInput.size() == totalServicePublisherEvents) {
|
if (receivedInput.size() == totalServicePublisherEvents) {
|
||||||
servicePublisherTaskCompletionLatch.countDown();
|
servicePublisherTaskCompletionLatch.countDown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -488,12 +482,10 @@ public class FanOutRecordsPublisherTest {
|
||||||
});
|
});
|
||||||
|
|
||||||
assertThat(source.getCurrentSequenceNumber(), equalTo(totalServicePublisherEvents + ""));
|
assertThat(source.getCurrentSequenceNumber(), equalTo(totalServicePublisherEvents + ""));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testIfStreamOfEventsAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception {
|
public void testIfStreamOfEventsAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception {
|
||||||
|
|
||||||
CountDownLatch onS2SCallLatch = new CountDownLatch(2);
|
CountDownLatch onS2SCallLatch = new CountDownLatch(2);
|
||||||
|
|
||||||
doAnswer(new Answer() {
|
doAnswer(new Answer() {
|
||||||
|
|
@ -549,10 +541,11 @@ public class FanOutRecordsPublisherTest {
|
||||||
|
|
||||||
@Override public void onNext(RecordsRetrieved input) {
|
@Override public void onNext(RecordsRetrieved input) {
|
||||||
receivedInput.add(input.processRecordsInput());
|
receivedInput.add(input.processRecordsInput());
|
||||||
assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber());
|
assertEquals("" + ++lastSeenSeqNum,
|
||||||
|
((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber());
|
||||||
subscription.request(1);
|
subscription.request(1);
|
||||||
servicePublisher.request(1);
|
servicePublisher.request(1);
|
||||||
if(receivedInput.size() == triggerCompleteAtNthEvent) {
|
if (receivedInput.size() == triggerCompleteAtNthEvent) {
|
||||||
servicePublisherTaskCompletionLatch.countDown();
|
servicePublisherTaskCompletionLatch.countDown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -599,7 +592,6 @@ public class FanOutRecordsPublisherTest {
|
||||||
// Let's wait for sometime to allow the publisher to re-subscribe
|
// Let's wait for sometime to allow the publisher to re-subscribe
|
||||||
onS2SCallLatch.await(5000, TimeUnit.MILLISECONDS);
|
onS2SCallLatch.await(5000, TimeUnit.MILLISECONDS);
|
||||||
verify(kinesisClient, times(2)).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture());
|
verify(kinesisClient, times(2)).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -681,7 +673,7 @@ public class FanOutRecordsPublisherTest {
|
||||||
receivedInput.add(input.processRecordsInput());
|
receivedInput.add(input.processRecordsInput());
|
||||||
subscription.request(1);
|
subscription.request(1);
|
||||||
servicePublisher.request(1);
|
servicePublisher.request(1);
|
||||||
if(receivedInput.size() == triggerCompleteAtNthEvent) {
|
if (receivedInput.size() == triggerCompleteAtNthEvent) {
|
||||||
servicePublisherTaskCompletionLatch.countDown();
|
servicePublisherTaskCompletionLatch.countDown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -728,7 +720,6 @@ public class FanOutRecordsPublisherTest {
|
||||||
// With shard end event, onComplete must be propagated to the subscriber.
|
// With shard end event, onComplete must be propagated to the subscriber.
|
||||||
onCompleteLatch.await(5000, TimeUnit.MILLISECONDS);
|
onCompleteLatch.await(5000, TimeUnit.MILLISECONDS);
|
||||||
assertTrue("OnComplete should be triggered", isOnCompleteTriggered[0]);
|
assertTrue("OnComplete should be triggered", isOnCompleteTriggered[0]);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -783,10 +774,11 @@ public class FanOutRecordsPublisherTest {
|
||||||
|
|
||||||
@Override public void onNext(RecordsRetrieved input) {
|
@Override public void onNext(RecordsRetrieved input) {
|
||||||
receivedInput.add(input.processRecordsInput());
|
receivedInput.add(input.processRecordsInput());
|
||||||
assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber());
|
assertEquals("" + ++lastSeenSeqNum,
|
||||||
|
((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber());
|
||||||
subscription.request(1);
|
subscription.request(1);
|
||||||
servicePublisher.request(1);
|
servicePublisher.request(1);
|
||||||
if(receivedInput.size() == triggerErrorAtNthEvent) {
|
if (receivedInput.size() == triggerErrorAtNthEvent) {
|
||||||
servicePublisherTaskCompletionLatch.countDown();
|
servicePublisherTaskCompletionLatch.countDown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -831,7 +823,6 @@ public class FanOutRecordsPublisherTest {
|
||||||
assertThat(source.getCurrentSequenceNumber(), equalTo(triggerErrorAtNthEvent + ""));
|
assertThat(source.getCurrentSequenceNumber(), equalTo(triggerErrorAtNthEvent + ""));
|
||||||
onErrorReceiveLatch.await(5000, TimeUnit.MILLISECONDS);
|
onErrorReceiveLatch.await(5000, TimeUnit.MILLISECONDS);
|
||||||
assertTrue("OnError should have been thrown", isOnErrorThrown[0]);
|
assertTrue("OnError should have been thrown", isOnErrorThrown[0]);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -879,10 +870,11 @@ public class FanOutRecordsPublisherTest {
|
||||||
|
|
||||||
@Override public void onNext(RecordsRetrieved input) {
|
@Override public void onNext(RecordsRetrieved input) {
|
||||||
receivedInput.add(input.processRecordsInput());
|
receivedInput.add(input.processRecordsInput());
|
||||||
assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber());
|
assertEquals("" + ++lastSeenSeqNum,
|
||||||
|
((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber());
|
||||||
subscription.request(1);
|
subscription.request(1);
|
||||||
servicePublisher.request(1);
|
servicePublisher.request(1);
|
||||||
if(receivedInput.size() == totalServicePublisherEvents) {
|
if (receivedInput.size() == totalServicePublisherEvents) {
|
||||||
servicePublisherTaskCompletionLatch.countDown();
|
servicePublisherTaskCompletionLatch.countDown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -924,7 +916,6 @@ public class FanOutRecordsPublisherTest {
|
||||||
});
|
});
|
||||||
|
|
||||||
assertThat(source.getCurrentSequenceNumber(), equalTo(totalServicePublisherEvents + ""));
|
assertThat(source.getCurrentSequenceNumber(), equalTo(totalServicePublisherEvents + ""));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -973,7 +964,8 @@ public class FanOutRecordsPublisherTest {
|
||||||
|
|
||||||
@Override public void onNext(RecordsRetrieved input) {
|
@Override public void onNext(RecordsRetrieved input) {
|
||||||
receivedInput.add(input.processRecordsInput());
|
receivedInput.add(input.processRecordsInput());
|
||||||
assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber());
|
assertEquals("" + ++lastSeenSeqNum,
|
||||||
|
((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber());
|
||||||
subscription.request(1);
|
subscription.request(1);
|
||||||
servicePublisher.request(1);
|
servicePublisher.request(1);
|
||||||
}
|
}
|
||||||
|
|
@ -1126,7 +1118,6 @@ public class FanOutRecordsPublisherTest {
|
||||||
assertThat(clientRecordsList.get(i), matchers.get(i));
|
assertThat(clientRecordsList.get(i), matchers.get(i));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -1242,7 +1233,6 @@ public class FanOutRecordsPublisherTest {
|
||||||
|
|
||||||
verifyRecords(nonFailingSubscriber.received.get(0).records(), matchers);
|
verifyRecords(nonFailingSubscriber.received.get(0).records(), matchers);
|
||||||
verifyRecords(nonFailingSubscriber.received.get(1).records(), nextMatchers);
|
verifyRecords(nonFailingSubscriber.received.get(1).records(), nextMatchers);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -1328,7 +1318,7 @@ public class FanOutRecordsPublisherTest {
|
||||||
fanOutRecordsPublisher
|
fanOutRecordsPublisher
|
||||||
.evictAckedEventAndScheduleNextEvent(() -> recordsRetrieved.batchUniqueIdentifier());
|
.evictAckedEventAndScheduleNextEvent(() -> recordsRetrieved.batchUniqueIdentifier());
|
||||||
// Send stale event periodically
|
// Send stale event periodically
|
||||||
if(totalRecordsRetrieved[0] % 10 == 0) {
|
if (totalRecordsRetrieved[0] % 10 == 0) {
|
||||||
fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent(
|
fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent(
|
||||||
() -> new BatchUniqueIdentifier("some_uuid_str", "some_old_flow"));
|
() -> new BatchUniqueIdentifier("some_uuid_str", "some_old_flow"));
|
||||||
}
|
}
|
||||||
|
|
@ -1368,7 +1358,7 @@ public class FanOutRecordsPublisherTest {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
// Now that we allowed upto 10 elements queued up, send a pair of good and stale ack to verify records
|
// Now that we allowed upto 10 elements queued up, send a pair of good and stale ack to verify records
|
||||||
// delivered as expected.
|
// delivered as expected.
|
||||||
while(count++ < 10 && (batchUniqueIdentifierQueued = ackQueue.take()) != null) {
|
while (count++ < 10 && (batchUniqueIdentifierQueued = ackQueue.take()) != null) {
|
||||||
final BatchUniqueIdentifier batchUniqueIdentifierFinal = batchUniqueIdentifierQueued;
|
final BatchUniqueIdentifier batchUniqueIdentifierFinal = batchUniqueIdentifierQueued;
|
||||||
fanOutRecordsPublisher
|
fanOutRecordsPublisher
|
||||||
.evictAckedEventAndScheduleNextEvent(() -> batchUniqueIdentifierFinal);
|
.evictAckedEventAndScheduleNextEvent(() -> batchUniqueIdentifierFinal);
|
||||||
|
|
@ -1403,7 +1393,7 @@ public class FanOutRecordsPublisherTest {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
// Now that we allowed upto 10 elements queued up, send a pair of good and stale ack to verify records
|
// Now that we allowed upto 10 elements queued up, send a pair of good and stale ack to verify records
|
||||||
// delivered as expected.
|
// delivered as expected.
|
||||||
while(count++ < 2 && (batchUniqueIdentifierQueued = ackQueue.poll(1000, TimeUnit.MILLISECONDS)) != null) {
|
while (count++ < 2 && (batchUniqueIdentifierQueued = ackQueue.poll(1000, TimeUnit.MILLISECONDS)) != null) {
|
||||||
final BatchUniqueIdentifier batchUniqueIdentifierFinal = batchUniqueIdentifierQueued;
|
final BatchUniqueIdentifier batchUniqueIdentifierFinal = batchUniqueIdentifierQueued;
|
||||||
fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent(
|
fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent(
|
||||||
() -> new BatchUniqueIdentifier("some_uuid_str", batchUniqueIdentifierFinal.getFlowIdentifier()));
|
() -> new BatchUniqueIdentifier("some_uuid_str", batchUniqueIdentifierFinal.getFlowIdentifier()));
|
||||||
|
|
@ -1457,11 +1447,11 @@ public class FanOutRecordsPublisherTest {
|
||||||
|
|
||||||
flowCaptor.getValue().exceptionOccurred(exception);
|
flowCaptor.getValue().exceptionOccurred(exception);
|
||||||
|
|
||||||
Optional<OnErrorEvent> onErrorEvent = subscriber.events.stream().filter(e -> e instanceof OnErrorEvent).map(e -> (OnErrorEvent)e).findFirst();
|
Optional<OnErrorEvent> onErrorEvent = subscriber.events.stream().filter(e -> e instanceof OnErrorEvent)
|
||||||
|
.map(e -> (OnErrorEvent) e).findFirst();
|
||||||
|
|
||||||
assertThat(onErrorEvent, equalTo(Optional.of(new OnErrorEvent(exception))));
|
assertThat(onErrorEvent, equalTo(Optional.of(new OnErrorEvent(exception))));
|
||||||
assertThat(acquireTimeoutLogged.get(), equalTo(true));
|
assertThat(acquireTimeoutLogged.get(), equalTo(true));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verifyRecords(List<KinesisClientRecord> clientRecordsList, List<KinesisClientRecordMatcher> matchers) {
|
private void verifyRecords(List<KinesisClientRecord> clientRecordsList, List<KinesisClientRecordMatcher> matchers) {
|
||||||
|
|
@ -1587,8 +1577,8 @@ public class FanOutRecordsPublisherTest {
|
||||||
public void run() {
|
public void run() {
|
||||||
for (int i = 1; i <= numOfTimes; ) {
|
for (int i = 1; i <= numOfTimes; ) {
|
||||||
demandNotifier.acquireUninterruptibly();
|
demandNotifier.acquireUninterruptibly();
|
||||||
if(i == sendCompletionAt) {
|
if (i == sendCompletionAt) {
|
||||||
if(shardEndAction != null) {
|
if (shardEndAction != null) {
|
||||||
shardEndAction.accept(i++);
|
shardEndAction.accept(i++);
|
||||||
} else {
|
} else {
|
||||||
action.accept(i++);
|
action.accept(i++);
|
||||||
|
|
@ -1596,7 +1586,7 @@ public class FanOutRecordsPublisherTest {
|
||||||
completeAction.run();
|
completeAction.run();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if(i == sendErrorAt) {
|
if (i == sendErrorAt) {
|
||||||
action.accept(i++);
|
action.accept(i++);
|
||||||
errorAction.run();
|
errorAction.run();
|
||||||
break;
|
break;
|
||||||
|
|
|
||||||
|
|
@ -331,7 +331,7 @@ public class KinesisDataFetcherTest {
|
||||||
|
|
||||||
private CompletableFuture<GetRecordsResponse> makeGetRecordsResponse(String nextIterator, List<Record> records) {
|
private CompletableFuture<GetRecordsResponse> makeGetRecordsResponse(String nextIterator, List<Record> records) {
|
||||||
List<ChildShard> childShards = new ArrayList<>();
|
List<ChildShard> childShards = new ArrayList<>();
|
||||||
if(nextIterator == null) {
|
if (nextIterator == null) {
|
||||||
childShards = createChildShards();
|
childShards = createChildShards();
|
||||||
}
|
}
|
||||||
return CompletableFuture.completedFuture(GetRecordsResponse.builder().nextShardIterator(nextIterator)
|
return CompletableFuture.completedFuture(GetRecordsResponse.builder().nextShardIterator(nextIterator)
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,6 @@ import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.Matchers.any;
|
||||||
import static org.mockito.Matchers.anyLong;
|
import static org.mockito.Matchers.anyLong;
|
||||||
import static org.mockito.Matchers.eq;
|
|
||||||
import static org.mockito.Mockito.doNothing;
|
import static org.mockito.Mockito.doNothing;
|
||||||
import static org.mockito.Mockito.doThrow;
|
import static org.mockito.Mockito.doThrow;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
@ -277,7 +276,8 @@ public class PrefetchRecordsPublisherIntegrationTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DataFetcherResult getRecords() {
|
public DataFetcherResult getRecords() {
|
||||||
GetRecordsResponse getRecordsResult = GetRecordsResponse.builder().records(new ArrayList<>(records)).nextShardIterator(nextShardIterator).millisBehindLatest(1000L).build();
|
GetRecordsResponse getRecordsResult = GetRecordsResponse.builder().records(new ArrayList<>(records))
|
||||||
|
.nextShardIterator(nextShardIterator).millisBehindLatest(1000L).build();
|
||||||
|
|
||||||
return new AdvancingResult(getRecordsResult);
|
return new AdvancingResult(getRecordsResult);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -327,7 +327,7 @@ public class PrefetchRecordsPublisherTest {
|
||||||
// TODO: fix this verification
|
// TODO: fix this verification
|
||||||
// verify(getRecordsRetrievalStrategy, times(callRate)).getRecords(MAX_RECORDS_PER_CALL);
|
// verify(getRecordsRetrievalStrategy, times(callRate)).getRecords(MAX_RECORDS_PER_CALL);
|
||||||
// assertEquals(spyQueue.size(), callRate);
|
// assertEquals(spyQueue.size(), callRate);
|
||||||
assertTrue("Call Rate is "+callRate,callRate < MAX_SIZE);
|
assertTrue("Call Rate is " + callRate, callRate < MAX_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -422,8 +422,10 @@ public class PrefetchRecordsPublisherTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRetryableRetrievalExceptionContinues() {
|
public void testRetryableRetrievalExceptionContinues() {
|
||||||
GetRecordsResponse response = GetRecordsResponse.builder().millisBehindLatest(100L).records(Collections.emptyList()).nextShardIterator(NEXT_SHARD_ITERATOR).build();
|
GetRecordsResponse response = GetRecordsResponse.builder().millisBehindLatest(100L)
|
||||||
when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenThrow(new RetryableRetrievalException("Timeout", new TimeoutException("Timeout"))).thenReturn(response);
|
.records(Collections.emptyList()).nextShardIterator(NEXT_SHARD_ITERATOR).build();
|
||||||
|
when(getRecordsRetrievalStrategy.getRecords(anyInt()))
|
||||||
|
.thenThrow(new RetryableRetrievalException("Timeout", new TimeoutException("Timeout"))).thenReturn(response);
|
||||||
|
|
||||||
getRecordsCache.start(sequenceNumber, initialPosition);
|
getRecordsCache.start(sequenceNumber, initialPosition);
|
||||||
|
|
||||||
|
|
@ -638,7 +640,7 @@ public class PrefetchRecordsPublisherTest {
|
||||||
|
|
||||||
verify(getRecordsRetrievalStrategy, atLeast(2)).getRecords(anyInt());
|
verify(getRecordsRetrievalStrategy, atLeast(2)).getRecords(anyInt());
|
||||||
|
|
||||||
while(getRecordsCache.getPublisherSession().prefetchRecordsQueue().remainingCapacity() > 0) {
|
while (getRecordsCache.getPublisherSession().prefetchRecordsQueue().remainingCapacity() > 0) {
|
||||||
Thread.yield();
|
Thread.yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -697,7 +699,7 @@ public class PrefetchRecordsPublisherTest {
|
||||||
|
|
||||||
public void resetIteratorTo(String nextIterator) {
|
public void resetIteratorTo(String nextIterator) {
|
||||||
Iterator<GetRecordsResponse> newIterator = responses.iterator();
|
Iterator<GetRecordsResponse> newIterator = responses.iterator();
|
||||||
while(newIterator.hasNext()) {
|
while (newIterator.hasNext()) {
|
||||||
GetRecordsResponse current = newIterator.next();
|
GetRecordsResponse current = newIterator.next();
|
||||||
if (StringUtils.equals(nextIterator, current.nextShardIterator())) {
|
if (StringUtils.equals(nextIterator, current.nextShardIterator())) {
|
||||||
if (!newIterator.hasNext()) {
|
if (!newIterator.hasNext()) {
|
||||||
|
|
@ -725,7 +727,7 @@ public class PrefetchRecordsPublisherTest {
|
||||||
|
|
||||||
private static final int LOSS_EVERY_NTH_RECORD = 50;
|
private static final int LOSS_EVERY_NTH_RECORD = 50;
|
||||||
private static int recordCounter = 0;
|
private static int recordCounter = 0;
|
||||||
private static final ScheduledExecutorService consumerHealthChecker = Executors.newScheduledThreadPool(1);
|
private static final ScheduledExecutorService CONSUMER_HEALTH_CHECKER = Executors.newScheduledThreadPool(1);
|
||||||
|
|
||||||
public LossyNotificationSubscriber(Subscriber<RecordsRetrieved> delegate, RecordsPublisher recordsPublisher) {
|
public LossyNotificationSubscriber(Subscriber<RecordsRetrieved> delegate, RecordsPublisher recordsPublisher) {
|
||||||
super(delegate, recordsPublisher);
|
super(delegate, recordsPublisher);
|
||||||
|
|
@ -738,7 +740,7 @@ public class PrefetchRecordsPublisherTest {
|
||||||
getDelegateSubscriber().onNext(recordsRetrieved);
|
getDelegateSubscriber().onNext(recordsRetrieved);
|
||||||
} else {
|
} else {
|
||||||
log.info("Record Loss Triggered");
|
log.info("Record Loss Triggered");
|
||||||
consumerHealthChecker.schedule(() -> {
|
CONSUMER_HEALTH_CHECKER.schedule(() -> {
|
||||||
getRecordsPublisher().restartFrom(recordsRetrieved);
|
getRecordsPublisher().restartFrom(recordsRetrieved);
|
||||||
Flowable.fromPublisher(getRecordsPublisher()).subscribeOn(Schedulers.computation())
|
Flowable.fromPublisher(getRecordsPublisher()).subscribeOn(Schedulers.computation())
|
||||||
.observeOn(Schedulers.computation(), true, 8).subscribe(this);
|
.observeOn(Schedulers.computation(), true, 8).subscribe(this);
|
||||||
|
|
|
||||||
|
|
@ -2,14 +2,8 @@ package software.amazon.kinesis.utils;
|
||||||
|
|
||||||
import lombok.NoArgsConstructor;
|
import lombok.NoArgsConstructor;
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ListTablesRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse;
|
|
||||||
import software.amazon.kinesis.common.FutureUtils;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
@Slf4j
|
@Slf4j
|
||||||
|
|
@ -28,7 +22,7 @@ public abstract class AWSResourceManager {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a list of all the names of resources of a specified type
|
* Get a list of all the names of resources of a specified type
|
||||||
* @return
|
*
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
*/
|
*/
|
||||||
public abstract List<String> getAllResourceNames() throws Exception;
|
public abstract List<String> getAllResourceNames() throws Exception;
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ public class BlockingUtils {
|
||||||
|
|
||||||
public static <Records> Records blockUntilRecordsAvailable(Supplier<Records> recordsSupplier, long timeoutMillis) {
|
public static <Records> Records blockUntilRecordsAvailable(Supplier<Records> recordsSupplier, long timeoutMillis) {
|
||||||
Records recordsRetrieved;
|
Records recordsRetrieved;
|
||||||
while((recordsRetrieved = recordsSupplier.get()) == null && timeoutMillis > 0 ) {
|
while ((recordsRetrieved = recordsSupplier.get()) == null && timeoutMillis > 0 ) {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
|
|
@ -29,7 +29,7 @@ public class BlockingUtils {
|
||||||
}
|
}
|
||||||
timeoutMillis -= 100;
|
timeoutMillis -= 100;
|
||||||
}
|
}
|
||||||
if(recordsRetrieved != null) {
|
if (recordsRetrieved != null) {
|
||||||
return recordsRetrieved;
|
return recordsRetrieved;
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException("No records found");
|
throw new RuntimeException("No records found");
|
||||||
|
|
@ -37,7 +37,7 @@ public class BlockingUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean blockUntilConditionSatisfied(Supplier<Boolean> conditionSupplier, long timeoutMillis) {
|
public static boolean blockUntilConditionSatisfied(Supplier<Boolean> conditionSupplier, long timeoutMillis) {
|
||||||
while(!conditionSupplier.get() && timeoutMillis > 0 ) {
|
while (!conditionSupplier.get() && timeoutMillis > 0 ) {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
|
|
|
||||||
|
|
@ -12,8 +12,6 @@ import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException;
|
||||||
import software.amazon.awssdk.services.dynamodb.model.TableStatus;
|
import software.amazon.awssdk.services.dynamodb.model.TableStatus;
|
||||||
import software.amazon.kinesis.common.FutureUtils;
|
import software.amazon.kinesis.common.FutureUtils;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.URISyntaxException;
|
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ public class SubscribeToShardRequestMatcher extends ArgumentMatcher<SubscribeToS
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean matches(Object rightObject) {
|
public boolean matches(Object rightObject) {
|
||||||
SubscribeToShardRequest right = (SubscribeToShardRequest)rightObject;
|
SubscribeToShardRequest right = (SubscribeToShardRequest) rightObject;
|
||||||
return left.shardId().equals(right.shardId()) &&
|
return left.shardId().equals(right.shardId()) &&
|
||||||
left.consumerARN().equals(right.consumerARN()) &&
|
left.consumerARN().equals(right.consumerARN()) &&
|
||||||
left.startingPosition().equals(right.startingPosition());
|
left.startingPosition().equals(right.startingPosition());
|
||||||
|
|
|
||||||
8
checkstyle/checkstyle-suppressions.xml
Normal file
8
checkstyle/checkstyle-suppressions.xml
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
<!DOCTYPE suppressions PUBLIC
|
||||||
|
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
|
||||||
|
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
|
||||||
|
|
||||||
|
<suppressions>
|
||||||
|
<!-- Disable all checks for protobuf-generated files. -->
|
||||||
|
<suppress files=".*/kpl/Messages.java" checks="[a-zA-Z0-9]*"/>
|
||||||
|
</suppressions>
|
||||||
50
checkstyle/checkstyle.xml
Normal file
50
checkstyle/checkstyle.xml
Normal file
|
|
@ -0,0 +1,50 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<!DOCTYPE module PUBLIC
|
||||||
|
"-//Checkstyle//DTD Checkstyle Configuration 1.3//EN"
|
||||||
|
"https://checkstyle.org/dtds/configuration_1_3.dtd">
|
||||||
|
|
||||||
|
<module name="Checker">
|
||||||
|
<module name="FileTabCharacter">
|
||||||
|
<property name="eachLine" value="true"/>
|
||||||
|
</module>
|
||||||
|
|
||||||
|
<module name="LineLength">
|
||||||
|
<property name="fileExtensions" value="java"/>
|
||||||
|
<property name="max" value="170"/>
|
||||||
|
<property name="ignorePattern" value="^package.*|^import.*|a href|href|http://|https://|ftp://"/>
|
||||||
|
</module>
|
||||||
|
|
||||||
|
<module name="SuppressWithPlainTextCommentFilter">
|
||||||
|
<property name="offCommentFormat" value="CHECKSTYLE.OFF\: ([\w\|]+)"/>
|
||||||
|
<property name="onCommentFormat" value="CHECKSTYLE.ON\: ([\w\|]+)"/>
|
||||||
|
<!-- $1 refers to the first match group in the regex defined in commentFormat -->
|
||||||
|
<property name="checkFormat" value="$1"/>
|
||||||
|
</module>
|
||||||
|
|
||||||
|
<module name="TreeWalker">
|
||||||
|
<module name="AvoidStarImport"/>
|
||||||
|
<module name="ArrayTrailingComma"/>
|
||||||
|
<module name="ConstantName"/>
|
||||||
|
<module name="CovariantEquals"/>
|
||||||
|
<module name="EmptyStatement"/>
|
||||||
|
<module name="EqualsHashCode"/>
|
||||||
|
<module name="InvalidJavadocPosition"/>
|
||||||
|
<module name="LocalFinalVariableName"/>
|
||||||
|
<module name="LocalVariableName"/>
|
||||||
|
<module name="MemberName"/>
|
||||||
|
<module name="MethodName">
|
||||||
|
<!-- Method names must start with a lowercase letter. -->
|
||||||
|
<property name="format" value="^[a-z]\w*$"/>
|
||||||
|
</module>
|
||||||
|
<module name="NeedBraces"/>
|
||||||
|
<module name="OneStatementPerLine"/>
|
||||||
|
<module name="OneTopLevelClass"/>
|
||||||
|
<module name="OuterTypeFilename"/>
|
||||||
|
<module name="ParameterName"/>
|
||||||
|
<module name="RedundantImport"/>
|
||||||
|
<module name="UnusedImports"/>
|
||||||
|
<module name="UpperEll"/>
|
||||||
|
<module name="WhitespaceAfter"/>
|
||||||
|
</module>
|
||||||
|
|
||||||
|
</module>
|
||||||
24
pom.xml
24
pom.xml
|
|
@ -22,7 +22,7 @@
|
||||||
<artifactId>amazon-kinesis-client-pom</artifactId>
|
<artifactId>amazon-kinesis-client-pom</artifactId>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
<name>Amazon Kinesis Client Library</name>
|
<name>Amazon Kinesis Client Library</name>
|
||||||
<version>2.5.1-SNAPSHOT</version>
|
<version>2.5.1</version>
|
||||||
<description>The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data
|
<description>The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data
|
||||||
from Amazon Kinesis.
|
from Amazon Kinesis.
|
||||||
</description>
|
</description>
|
||||||
|
|
@ -72,6 +72,28 @@
|
||||||
</distributionManagement>
|
</distributionManagement>
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||||
|
<version>3.3.0</version>
|
||||||
|
<configuration>
|
||||||
|
<configLocation>checkstyle/checkstyle.xml</configLocation>
|
||||||
|
<consoleOutput>true</consoleOutput>
|
||||||
|
<failOnViolation>true</failOnViolation>
|
||||||
|
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||||
|
<suppressionsLocation>checkstyle/checkstyle-suppressions.xml</suppressionsLocation>
|
||||||
|
</configuration>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<phase>validate</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>check</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
<pluginManagement>
|
<pluginManagement>
|
||||||
<plugins>
|
<plugins>
|
||||||
<plugin>
|
<plugin>
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue