diff --git a/.gitignore b/.gitignore
index ffa4e664..863e68d5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
target/
AwsCredentials.properties
+.idea
diff --git a/pom.xml b/pom.xml
index 1a2a017d..a9c22878 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1,112 +1,180 @@
- 4.0.0
- com.amazonaws
- amazon-kinesis-client
- jar
- Amazon Kinesis Client Library for Java
- 1.6.4
- The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data from Amazon Kinesis.
- https://aws.amazon.com/kinesis
+ 4.0.0
+ com.amazonaws
+ amazon-kinesis-client
+ jar
+ Amazon Kinesis Client Library for Java
+ 1.6.4
+ The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data
+ from Amazon Kinesis.
+
+ https://aws.amazon.com/kinesis
-
- https://github.com/awslabs/amazon-kinesis-client.git
-
+
+ https://github.com/awslabs/amazon-kinesis-client.git
+
-
-
- Amazon Software License
- https://aws.amazon.com/asl
- repo
-
-
+
+
+ Amazon Software License
+ https://aws.amazon.com/asl
+ repo
+
+
-
- 1.11.14
-
+
+ 1.11.14
+
-
-
- com.amazonaws
- aws-java-sdk-core
- ${aws-java-sdk.version}
-
-
- com.amazonaws
- aws-java-sdk-dynamodb
- ${aws-java-sdk.version}
-
-
- com.amazonaws
- aws-java-sdk-kinesis
- ${aws-java-sdk.version}
-
-
- com.amazonaws
- aws-java-sdk-cloudwatch
- ${aws-java-sdk.version}
-
-
- com.google.guava
- guava
- 18.0
-
-
- com.google.protobuf
- protobuf-java
- 2.6.1
-
-
- commons-lang
- commons-lang
- 2.6
-
-
+
+
+ com.amazonaws
+ aws-java-sdk-core
+ ${aws-java-sdk.version}
+
+
+ com.amazonaws
+ aws-java-sdk-dynamodb
+ ${aws-java-sdk.version}
+
+
+ com.amazonaws
+ aws-java-sdk-kinesis
+ ${aws-java-sdk.version}
+
+
+ com.amazonaws
+ aws-java-sdk-cloudwatch
+ ${aws-java-sdk.version}
+
+
+ com.google.guava
+ guava
+ 18.0
+
+
+ com.google.protobuf
+ protobuf-java
+ 2.6.1
+
+
+ commons-lang
+ commons-lang
+ 2.6
+
-
-
- amazonwebservices
- Amazon Web Services
- https://aws.amazon.com
-
- developer
-
-
-
+
+
+ junit
+ junit
+ 4.11
+ test
+
-
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
- 3.2
-
- 1.7
- 1.7
- UTF-8
-
-
-
-
+
+ org.mockito
+ mockito-all
+ 1.10.19
+ test
+
+
+ org.hamcrest
+ hamcrest-all
+ 1.3
+ test
+
+
+
+ com.amazonaws
+ DynamoDBLocal
+ 1.10.5.1
+ test
+
+
+
+
+
+ dynamodb-local
+ DynamoDB Local Release Repository
+ http://dynamodb-local.s3-website-us-west-2.amazonaws.com/release
+
+
+
+
+
+ amazonwebservices
+ Amazon Web Services
+ https://aws.amazon.com
+
+ developer
+
+
+
+
+
+
org.apache.maven.plugins
- maven-gpg-plugin
- 1.5
-
-
- sign-artifacts
- verify
-
- sign
-
-
-
+ maven-compiler-plugin
+ 3.2
+
+ 1.7
+ 1.7
+ UTF-8
+
-
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+ 1.5
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+ 2.19.1
+
+
+ **/*IntegrationTest.java
+
+
+
+
+ org.apache.maven.plugins
+ maven-failsafe-plugin
+ 2.19.1
+
+
+ **/*IntegrationTest.java
+
+
+
+
+
+ integration-test
+ verify
+
+
+
+
+
+
+
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/config/KinesisClientLibConfiguratorTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/config/KinesisClientLibConfiguratorTest.java
new file mode 100644
index 00000000..1ccd4941
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/config/KinesisClientLibConfiguratorTest.java
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.config;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.util.Set;
+
+import org.apache.commons.lang.StringUtils;
+import org.junit.Test;
+
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.auth.AWSCredentialsProvider;
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream;
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
+import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
+import com.google.common.collect.ImmutableSet;
+
+public class KinesisClientLibConfiguratorTest {
+
+ private String credentialName1 =
+ "com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProvider";
+ private String credentialName2 =
+ "com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfiguratorTest$AlwaysFailCredentialsProvider";
+ private KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator();
+
+ @Test
+ public void testWithBasicSetup() {
+ KinesisClientLibConfiguration config =
+ getConfiguration(StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = " + credentialName1,
+ "workerId = 123"
+ }, '\n'));
+ assertEquals(config.getApplicationName(), "b");
+ assertEquals(config.getStreamName(), "a");
+ assertEquals(config.getWorkerIdentifier(), "123");
+ }
+
+ @Test
+ public void testWithLongVariables() {
+ KinesisClientLibConfiguration config =
+ getConfiguration(StringUtils.join(new String[] {
+ "applicationName = app",
+ "streamName = 123",
+ "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
+ "workerId = 123",
+ "failoverTimeMillis = 100",
+ "shardSyncIntervalMillis = 500"
+ }, '\n'));
+
+ assertEquals(config.getApplicationName(), "app");
+ assertEquals(config.getStreamName(), "123");
+ assertEquals(config.getWorkerIdentifier(), "123");
+ assertEquals(config.getFailoverTimeMillis(), 100);
+ assertEquals(config.getShardSyncIntervalMillis(), 500);
+ }
+
+ @Test
+ public void testWithUnsupportedClientConfigurationVariables() {
+ KinesisClientLibConfiguration config =
+ getConfiguration(StringUtils.join(new String[] {
+ "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
+ "workerId = id",
+ "kinesisClientConfig = {}",
+ "streamName = stream",
+ "applicationName = b"
+ }, '\n'));
+
+ assertEquals(config.getApplicationName(), "b");
+ assertEquals(config.getStreamName(), "stream");
+ assertEquals(config.getWorkerIdentifier(), "id");
+ // by setting the configuration there is no effect on kinesisClientConfiguration variable.
+ }
+
+ @Test
+ public void testWithIntVariables() {
+ KinesisClientLibConfiguration config =
+ getConfiguration(StringUtils.join(new String[] {
+ "streamName = kinesis",
+ "AWSCredentialsProvider = " + credentialName2 + ", " + credentialName1,
+ "workerId = w123",
+ "maxRecords = 10",
+ "metricsMaxQueueSize = 20",
+ "applicationName = kinesis"
+ }, '\n'));
+
+ assertEquals(config.getApplicationName(), "kinesis");
+ assertEquals(config.getStreamName(), "kinesis");
+ assertEquals(config.getWorkerIdentifier(), "w123");
+ assertEquals(config.getMaxRecords(), 10);
+ assertEquals(config.getMetricsMaxQueueSize(), 20);
+ }
+
+ @Test
+ public void testWithBooleanVariables() {
+ KinesisClientLibConfiguration config =
+ getConfiguration(StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = ABCD, " + credentialName1,
+ "workerId = 0",
+ "cleanupLeasesUponShardCompletion = false",
+ "validateSequenceNumberBeforeCheckpointing = true"
+ }, '\n'));
+
+ assertEquals(config.getApplicationName(), "b");
+ assertEquals(config.getStreamName(), "a");
+ assertEquals(config.getWorkerIdentifier(), "0");
+ assertFalse(config.shouldCleanupLeasesUponShardCompletion());
+ assertTrue(config.shouldValidateSequenceNumberBeforeCheckpointing());
+ }
+
+ @Test
+ public void testWithStringVariables() {
+ KinesisClientLibConfiguration config =
+ getConfiguration(StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = ABCD," + credentialName1,
+ "workerId = 1",
+ "kinesisEndpoint = https://kinesis",
+ "metricsLevel = SUMMARY"
+ }, '\n'));
+
+ assertEquals(config.getWorkerIdentifier(), "1");
+ assertEquals(config.getKinesisEndpoint(), "https://kinesis");
+ assertEquals(config.getMetricsLevel(), MetricsLevel.SUMMARY);
+ }
+
+ @Test
+ public void testWithSetVariables() {
+ KinesisClientLibConfiguration config =
+ getConfiguration(StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = ABCD," + credentialName1,
+ "workerId = 1",
+ "metricsEnabledDimensions = ShardId, WorkerIdentifier"
+ }, '\n'));
+
+ Set expectedMetricsEnabledDimensions = ImmutableSet.builder().add(
+ "ShardId", "WorkerIdentifier").addAll(
+ KinesisClientLibConfiguration.METRICS_ALWAYS_ENABLED_DIMENSIONS).build();
+ assertEquals(config.getMetricsEnabledDimensions(), expectedMetricsEnabledDimensions);
+ }
+
+ @Test
+ public void testWithInitialPositionInStreamVariables() {
+ KinesisClientLibConfiguration config =
+ getConfiguration(StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = ABCD," + credentialName1,
+ "workerId = 123",
+ "initialPositionInStream = TriM_Horizon"
+ }, '\n'));
+
+ assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON);
+ }
+
+ @Test
+ public void testSkippingNonKCLVariables() {
+ KinesisClientLibConfiguration config =
+ getConfiguration(StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = ABCD," + credentialName1,
+ "workerId = 123",
+ "initialPositionInStream = TriM_Horizon",
+ "abc = 1"
+ }, '\n'));
+
+ assertEquals(config.getApplicationName(), "b");
+ assertEquals(config.getStreamName(), "a");
+ assertEquals(config.getWorkerIdentifier(), "123");
+ assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON);
+ }
+
+ @Test
+ public void testWithInvalidIntValue() {
+ String test = StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = " + credentialName1,
+ "workerId = 123",
+ "failoverTimeMillis = 100nf"
+ }, '\n');
+ InputStream input = new ByteArrayInputStream(test.getBytes());
+
+ try {
+ configurator.getConfiguration(input);
+ } catch (Exception e) {
+ fail("Don't expect to fail on invalid variable value");
+ }
+ }
+
+ @Test
+ public void testWithNegativeIntValue() {
+ String test = StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = " + credentialName1,
+ "workerId = 123",
+ "failoverTimeMillis = -12"
+ }, '\n');
+ InputStream input = new ByteArrayInputStream(test.getBytes());
+
+ // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
+ try {
+ configurator.getConfiguration(input);
+ } catch (Exception e) {
+ fail("Don't expect to fail on invalid variable value");
+ }
+ }
+
+ @Test
+ public void testWithMissingCredentialsProvider() {
+ String test = StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "workerId = 123",
+ "failoverTimeMillis = 100",
+ "shardSyncIntervalMillis = 500"
+ }, '\n');
+ InputStream input = new ByteArrayInputStream(test.getBytes());
+
+ // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
+ try {
+ configurator.getConfiguration(input);
+ fail("expect failure with no credentials provider variables");
+ } catch (Exception e) {
+ // succeed
+ }
+ }
+
+ @Test
+ public void testWithMissingWorkerId() {
+ String test = StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = " + credentialName1,
+ "failoverTimeMillis = 100",
+ "shardSyncIntervalMillis = 500"
+ }, '\n');
+ InputStream input = new ByteArrayInputStream(test.getBytes());
+ KinesisClientLibConfiguration config = configurator.getConfiguration(input);
+
+ // if workerId is not provided, configurator should assign one for it automatically
+ assertNotNull(config.getWorkerIdentifier());
+ assertFalse(config.getWorkerIdentifier().isEmpty());
+ }
+
+ @Test
+ public void testWithMissingStreamName() {
+ String test = StringUtils.join(new String[] {
+ "applicationName = b",
+ "AWSCredentialsProvider = " + credentialName1,
+ "workerId = 123",
+ "failoverTimeMillis = 100"
+ }, '\n');
+ InputStream input = new ByteArrayInputStream(test.getBytes());
+
+ // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
+ try {
+ configurator.getConfiguration(input);
+ fail("expect failure with no stream name variables");
+ } catch (Exception e) {
+ // succeed
+ }
+ }
+
+ @Test
+ public void testWithMissingApplicationName() {
+ String test = StringUtils.join(new String[] {
+ "streamName = a",
+ "AWSCredentialsProvider = " + credentialName1,
+ "workerId = 123",
+ "failoverTimeMillis = 100"
+ }, '\n');
+ InputStream input = new ByteArrayInputStream(test.getBytes());
+
+ // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
+ try {
+ configurator.getConfiguration(input);
+ fail("expect failure with no application variables");
+ } catch (Exception e) {
+ // succeed
+ }
+ }
+
+ @Test
+ public void testWithAWSCredentialsFailed() {
+ String test = StringUtils.join(new String[] {
+ "streamName = a",
+ "applicationName = b",
+ "AWSCredentialsProvider = " + credentialName2,
+ "failoverTimeMillis = 100",
+ "shardSyncIntervalMillis = 500"
+ }, '\n');
+ InputStream input = new ByteArrayInputStream(test.getBytes());
+
+ // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
+ try {
+ KinesisClientLibConfiguration config = configurator.getConfiguration(input);
+ config.getKinesisCredentialsProvider().getCredentials();
+ fail("expect failure with wrong credentials provider");
+ } catch (Exception e) {
+ // succeed
+ }
+ }
+
+ /**
+ * This credentials provider will always succeed
+ */
+ public static class AlwaysSucceedCredentialsProvider implements AWSCredentialsProvider {
+
+ @Override
+ public AWSCredentials getCredentials() {
+ return null;
+ }
+
+ @Override
+ public void refresh() {
+ }
+
+ }
+
+ /**
+ * This credentials provider will always fail
+ */
+ public static class AlwaysFailCredentialsProvider implements AWSCredentialsProvider {
+
+ @Override
+ public AWSCredentials getCredentials() {
+ throw new IllegalArgumentException();
+ }
+
+ @Override
+ public void refresh() {
+ }
+
+ }
+
+ private KinesisClientLibConfiguration getConfiguration(String configString) {
+ InputStream input = new ByteArrayInputStream(configString.getBytes());
+ KinesisClientLibConfiguration config = configurator.getConfiguration(input);
+ return config;
+ }
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/CheckpointImplTestBase.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/CheckpointImplTestBase.java
new file mode 100644
index 00000000..6e93a296
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/CheckpointImplTestBase.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
+import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
+
+/**
+ * Base class for unit testing checkpoint implementations.
+ * This class has tests common to InMemory and FileBased implementations.
+ */
+public abstract class CheckpointImplTestBase {
+
+ protected final String startingSequenceNumber = "0001000";
+ protected final String testConcurrencyToken = "testToken";
+ protected ICheckpoint checkpoint;
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ MetricsHelper.startScope(new NullMetricsFactory(), "CheckpointImplTestBase");
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ }
+
+ /**
+ * Constructor.
+ */
+ public CheckpointImplTestBase() {
+ super();
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ @Test
+ public final void testInitialSetCheckpoint() throws Exception {
+ String sequenceNumber = "1";
+ String shardId = "myShardId";
+ ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber);
+ checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken);
+ ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId);
+ Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint);
+ }
+
+ @Test
+ public final void testAdvancingSetCheckpoint() throws Exception {
+ String shardId = "myShardId";
+ for (Integer i = 0; i < 10; i++) {
+ String sequenceNumber = i.toString();
+ ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber);
+ checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken);
+ ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId);
+ Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint);
+ }
+ }
+
+ /**
+ * Test method to verify setCheckpoint and getCheckpoint methods.
+ *
+ * @throws Exception
+ */
+ @Test
+ public final void testSetAndGetCheckpoint() throws Exception {
+ String checkpointValue = "12345";
+ String shardId = "testShardId-1";
+ String concurrencyToken = "token-1";
+ ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(checkpointValue);
+ checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken);
+ Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImpl.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImpl.java
new file mode 100644
index 00000000..ad761ef5
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImpl.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+
+/**
+ * Everything is stored in memory and there is no fault-tolerance.
+ */
+public class InMemoryCheckpointImpl implements ICheckpoint {
+
+ private static final Log LOG = LogFactory.getLog(InMemoryCheckpointImpl.class);
+
+ private Map checkpoints = new HashMap<>();
+ private Map flushpoints = new HashMap<>();
+ private final String startingSequenceNumber;
+
+ /**
+ * Constructor.
+ *
+ * @param startingSequenceNumber Initial checkpoint will be set to this sequenceNumber (for all shards).
+ */
+ public InMemoryCheckpointImpl(String startingSequenceNumber) {
+ super();
+ this.startingSequenceNumber = startingSequenceNumber;
+ }
+
+ ExtendedSequenceNumber getLastCheckpoint(String shardId) {
+ ExtendedSequenceNumber checkpoint = checkpoints.get(shardId);
+ if (checkpoint == null) {
+ checkpoint = new ExtendedSequenceNumber(startingSequenceNumber);
+ }
+ LOG.debug("getLastCheckpoint shardId: " + shardId + " checkpoint: " + checkpoint);
+ return checkpoint;
+ }
+
+ ExtendedSequenceNumber getLastFlushpoint(String shardId) {
+ ExtendedSequenceNumber flushpoint = flushpoints.get(shardId);
+ LOG.debug("getLastFlushpoint shardId: " + shardId + " flushpoint: " + flushpoint);
+ return flushpoint;
+ }
+
+ void resetCheckpointToLastFlushpoint(String shardId) throws KinesisClientLibException {
+ ExtendedSequenceNumber currentFlushpoint = flushpoints.get(shardId);
+ if (currentFlushpoint == null) {
+ checkpoints.put(shardId, new ExtendedSequenceNumber(startingSequenceNumber));
+ } else {
+ checkpoints.put(shardId, currentFlushpoint);
+ }
+ }
+
+ ExtendedSequenceNumber getGreatestPrimaryFlushpoint(String shardId) throws KinesisClientLibException {
+ verifyNotEmpty(shardId, "shardId must not be null.");
+ ExtendedSequenceNumber greatestFlushpoint = getLastFlushpoint(shardId);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("getGreatestPrimaryFlushpoint value for shardId " + shardId + " = " + greatestFlushpoint);
+ }
+ return greatestFlushpoint;
+ };
+
+ ExtendedSequenceNumber getRestartPoint(String shardId) {
+ verifyNotEmpty(shardId, "shardId must not be null.");
+ ExtendedSequenceNumber restartPoint = getLastCheckpoint(shardId);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("getRestartPoint value for shardId " + shardId + " = " + restartPoint);
+ }
+ return restartPoint;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void setCheckpoint(String shardId, ExtendedSequenceNumber checkpointValue, String concurrencyToken)
+ throws KinesisClientLibException {
+ checkpoints.put(shardId, checkpointValue);
+ flushpoints.put(shardId, checkpointValue);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("shardId: " + shardId + " checkpoint: " + checkpointValue);
+ }
+
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public ExtendedSequenceNumber getCheckpoint(String shardId) throws KinesisClientLibException {
+ ExtendedSequenceNumber checkpoint = flushpoints.get(shardId);
+ LOG.debug("getCheckpoint shardId: " + shardId + " checkpoint: " + checkpoint);
+ return checkpoint;
+ }
+
+ /** Check that string is neither null nor empty.
+ */
+ static void verifyNotEmpty(String string, String message) {
+ if ((string == null) || (string.isEmpty())) {
+ throw new IllegalArgumentException(message);
+ }
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImplTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImplTest.java
new file mode 100644
index 00000000..04408b36
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImplTest.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint;
+
+import org.junit.Before;
+
+
+/**
+ * Test the InMemoryCheckpointImplTest class.
+ */
+public class InMemoryCheckpointImplTest extends CheckpointImplTestBase {
+ /**
+ * Constructor.
+ */
+ public InMemoryCheckpointImplTest() {
+ super();
+ }
+ /**
+ * @throws java.lang.Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ checkpoint = new InMemoryCheckpointImpl(startingSequenceNumber);
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/BlockOnParentShardTaskTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/BlockOnParentShardTaskTest.java
new file mode 100644
index 00000000..a9dcc429
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/BlockOnParentShardTaskTest.java
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
+import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
+import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
+import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
+
+/**
+ *
+ */
+public class BlockOnParentShardTaskTest {
+
+ private static final Log LOG = LogFactory.getLog(BlockOnParentShardTaskTest.class);
+ private final long backoffTimeInMillis = 50L;
+ private final String shardId = "shardId-97";
+ private final String concurrencyToken = "testToken";
+ private final List emptyParentShardIds = new ArrayList();
+ ShardInfo defaultShardInfo = new ShardInfo(shardId, concurrencyToken, emptyParentShardIds);
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ /**
+ * Test call() when there are no parent shards.
+ * @throws ProvisionedThroughputException
+ * @throws InvalidStateException
+ * @throws DependencyException
+ */
+ @Test
+ public final void testCallNoParents()
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseManager.getLease(shardId)).thenReturn(null);
+
+ BlockOnParentShardTask task = new BlockOnParentShardTask(defaultShardInfo, leaseManager, backoffTimeInMillis);
+ TaskResult result = task.call();
+ Assert.assertNull(result.getException());
+ }
+
+ /**
+ * Test call() when there are 1-2 parent shards that have been fully processed.
+ * @throws ProvisionedThroughputException
+ * @throws InvalidStateException
+ * @throws DependencyException
+ */
+ @Test
+ public final void testCallWhenParentsHaveFinished()
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+
+ ShardInfo shardInfo = null;
+ BlockOnParentShardTask task = null;
+ String parent1ShardId = "shardId-1";
+ String parent2ShardId = "shardId-2";
+ List parentShardIds = new ArrayList<>();
+ TaskResult result = null;
+
+ KinesisClientLease parent1Lease = new KinesisClientLease();
+ parent1Lease.setCheckpoint(ExtendedSequenceNumber.SHARD_END);
+ KinesisClientLease parent2Lease = new KinesisClientLease();
+ parent2Lease.setCheckpoint(ExtendedSequenceNumber.SHARD_END);
+
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseManager.getLease(parent1ShardId)).thenReturn(parent1Lease);
+ when(leaseManager.getLease(parent2ShardId)).thenReturn(parent2Lease);
+
+ // test single parent
+ parentShardIds.add(parent1ShardId);
+ shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
+ task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
+ result = task.call();
+ Assert.assertNull(result.getException());
+
+ // test two parents
+ parentShardIds.add(parent2ShardId);
+ shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
+ task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
+ result = task.call();
+ Assert.assertNull(result.getException());
+ }
+
+ /**
+ * Test call() when there are 1-2 parent shards that have NOT been fully processed.
+ * @throws ProvisionedThroughputException
+ * @throws InvalidStateException
+ * @throws DependencyException
+ */
+ @Test
+ public final void testCallWhenParentsHaveNotFinished()
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+
+ ShardInfo shardInfo = null;
+ BlockOnParentShardTask task = null;
+ String parent1ShardId = "shardId-1";
+ String parent2ShardId = "shardId-2";
+ List parentShardIds = new ArrayList<>();
+ TaskResult result = null;
+
+ KinesisClientLease parent1Lease = new KinesisClientLease();
+ parent1Lease.setCheckpoint(ExtendedSequenceNumber.LATEST);
+ KinesisClientLease parent2Lease = new KinesisClientLease();
+ // mock a sequence number checkpoint
+ parent2Lease.setCheckpoint(new ExtendedSequenceNumber("98182584034"));
+
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseManager.getLease(parent1ShardId)).thenReturn(parent1Lease);
+ when(leaseManager.getLease(parent2ShardId)).thenReturn(parent2Lease);
+
+ // test single parent
+ parentShardIds.add(parent1ShardId);
+ shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
+ task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
+ result = task.call();
+ Assert.assertNotNull(result.getException());
+
+ // test two parents
+ parentShardIds.add(parent2ShardId);
+ shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
+ task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
+ result = task.call();
+ Assert.assertNotNull(result.getException());
+ }
+
+ /**
+ * Test call() with 1 parent shard before and after it is completely processed.
+ * @throws ProvisionedThroughputException
+ * @throws InvalidStateException
+ * @throws DependencyException
+ */
+ @Test
+ public final void testCallBeforeAndAfterAParentFinishes()
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+
+ BlockOnParentShardTask task = null;
+ String parentShardId = "shardId-1";
+ List parentShardIds = new ArrayList<>();
+ parentShardIds.add(parentShardId);
+ ShardInfo shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
+ TaskResult result = null;
+ KinesisClientLease parentLease = new KinesisClientLease();
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseManager.getLease(parentShardId)).thenReturn(parentLease);
+
+ // test when parent shard has not yet been fully processed
+ parentLease.setCheckpoint(new ExtendedSequenceNumber("98182584034"));
+ task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
+ result = task.call();
+ Assert.assertNotNull(result.getException());
+
+ // test when parent has been fully processed
+ parentLease.setCheckpoint(ExtendedSequenceNumber.SHARD_END);
+ task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
+ result = task.call();
+ Assert.assertNull(result.getException());
+ }
+
+ /**
+ * Test to verify we return the right task type.
+ */
+ @Test
+ public final void testGetTaskType() {
+ BlockOnParentShardTask task = new BlockOnParentShardTask(defaultShardInfo, null, backoffTimeInMillis);
+ Assert.assertEquals(TaskType.BLOCK_ON_PARENT_SHARDS, task.getTaskType());
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/CheckpointValueComparatorTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/CheckpointValueComparatorTest.java
new file mode 100644
index 00000000..7abe7c52
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/CheckpointValueComparatorTest.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import junit.framework.Assert;
+
+import org.junit.Test;
+
+import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
+
+public class CheckpointValueComparatorTest {
+ @Test
+ public final void testCheckpointValueComparator() {
+ CheckpointValueComparator comparator = new CheckpointValueComparator();
+ final String trimHorizon = SentinelCheckpoint.TRIM_HORIZON.toString();
+ final String latest = SentinelCheckpoint.LATEST.toString();
+ final String shardEnd = SentinelCheckpoint.SHARD_END.toString();
+ final String lesser = "17";
+ final String greater = "123";
+ final String notASentinelCheckpointValue = "just-some-string";
+
+ String[][] equalValues =
+ { { trimHorizon, trimHorizon }, { latest, latest }, { greater, greater }, { shardEnd, shardEnd } };
+
+ // Check equal values
+ for (String[] pair : equalValues) {
+ Assert.assertTrue("Expected: " + pair[0] + " and " + pair[1] + " to be equal",
+ comparator.compare(pair[0], pair[1]) == 0 && comparator.compare(pair[1], pair[0]) == 0);
+
+ }
+
+ // Check non-equal values
+ String[][] lessThanValues =
+ { { latest, lesser }, { trimHorizon, greater }, { lesser, greater },
+ { trimHorizon, shardEnd }, { latest, shardEnd }, { lesser, shardEnd }, { trimHorizon, latest } };
+ for (String[] pair : lessThanValues) {
+ Assert.assertTrue("Expected: " + pair[0] + " < " + pair[1],
+ comparator.compare(pair[0], pair[1]) < 0);
+ Assert.assertTrue("Expected: " + pair[1] + " > " + pair[0],
+ comparator.compare(pair[1], pair[0]) > 0);
+ }
+
+ // Check bad values
+ String[][] badValues =
+ { { null, null }, { latest, null }, { null, trimHorizon }, { null, shardEnd }, { null, lesser },
+ { null, notASentinelCheckpointValue }, { latest, notASentinelCheckpointValue },
+ { notASentinelCheckpointValue, trimHorizon }, { shardEnd, notASentinelCheckpointValue },
+ { notASentinelCheckpointValue, lesser } };
+ for (String[] pair : badValues) {
+ try {
+ comparator.compare(pair[0], pair[1]);
+ Assert.fail("Compare should have thrown an exception when one of its parameters is not a sequence "
+ + "number and not a sentinel checkpoint value but didn't when comparing " + pair[0] + " and "
+ + pair[1]);
+ } catch (Exception e1) {
+ try {
+ comparator.compare(pair[1], pair[0]);
+ Assert.fail("Compare should have thrown an exception when one of its parameters is not a sequence "
+ + "number and not a sentinel checkpoint value but didn't when comparing " + pair[1]
+ + " and " + pair[0]);
+ } catch (Exception e2) {
+ continue;
+ }
+ }
+ }
+ }
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ExceptionThrowingLeaseManager.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ExceptionThrowingLeaseManager.java
new file mode 100644
index 00000000..765aaa44
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ExceptionThrowingLeaseManager.java
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
+import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
+import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
+import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
+
+/**
+ * Mock Lease Manager by randomly throwing Leasing Exceptions.
+ *
+ */
+class ExceptionThrowingLeaseManager implements ILeaseManager {
+ private static final Log LOG = LogFactory.getLog(ExceptionThrowingLeaseManager.class);
+ private static final Throwable EXCEPTION_MSG = new Throwable("Test Exception");
+
+ // Use array below to control in what situations we want to throw exceptions.
+ private int[] leaseManagerMethodCallingCount;
+
+ /**
+ * Methods which we support (simulate exceptions).
+ */
+ enum ExceptionThrowingLeaseManagerMethods {
+ CREATELEASETABLEIFNOTEXISTS(0),
+ LEASETABLEEXISTS(1),
+ WAITUNTILLEASETABLEEXISTS(2),
+ LISTLEASES(3),
+ CREATELEASEIFNOTEXISTS(4),
+ GETLEASE(5),
+ RENEWLEASE(6),
+ TAKELEASE(7),
+ EVICTLEASE(8),
+ DELETELEASE(9),
+ DELETEALL(10),
+ UPDATELEASE(11),
+ NONE(Integer.MIN_VALUE);
+
+ private Integer index;
+
+ ExceptionThrowingLeaseManagerMethods(Integer index) {
+ this.index = index;
+ }
+
+ Integer getIndex() {
+ return this.index;
+ }
+ }
+
+ // Define which method should throw exception and when it should throw exception.
+ private ExceptionThrowingLeaseManagerMethods methodThrowingException = ExceptionThrowingLeaseManagerMethods.NONE;
+ private int timeThrowingException = Integer.MAX_VALUE;
+
+ // The real local lease manager which would do the real implementations.
+ private final ILeaseManager leaseManager;
+
+ /**
+ * Constructor accepts lease manager as only argument.
+ *
+ * @param leaseManager which will do the real implementations
+ */
+ ExceptionThrowingLeaseManager(ILeaseManager leaseManager) {
+ this.leaseManager = leaseManager;
+ this.leaseManagerMethodCallingCount = new int[ExceptionThrowingLeaseManagerMethods.values().length];
+ }
+
+ /**
+ * Set parameters used for throwing exception.
+ *
+ * @param method which would throw exception
+ * @param throwingTime defines what time to throw exception
+ */
+ void setLeaseLeaseManagerThrowingExceptionScenario(ExceptionThrowingLeaseManagerMethods method, int throwingTime) {
+ this.methodThrowingException = method;
+ this.timeThrowingException = throwingTime;
+ }
+
+ /**
+ * Reset all parameters used for throwing exception.
+ */
+ void clearLeaseManagerThrowingExceptionScenario() {
+ Arrays.fill(leaseManagerMethodCallingCount, 0);
+ this.methodThrowingException = ExceptionThrowingLeaseManagerMethods.NONE;
+ this.timeThrowingException = Integer.MAX_VALUE;
+ }
+
+ // Throw exception when the conditions are satisfied :
+ // 1). method equals to methodThrowingException
+ // 2). method calling count equals to what we want
+ private void throwExceptions(String methodName, ExceptionThrowingLeaseManagerMethods method)
+ throws DependencyException {
+ // Increase calling count for this method
+ leaseManagerMethodCallingCount[method.getIndex()]++;
+ if (method.equals(methodThrowingException)
+ && (leaseManagerMethodCallingCount[method.getIndex()] == timeThrowingException)) {
+ // Throw Dependency Exception if all conditions are satisfied.
+ LOG.debug("Throwing DependencyException in " + methodName);
+ throw new DependencyException(EXCEPTION_MSG);
+ }
+ }
+
+ @Override
+ public boolean createLeaseTableIfNotExists(Long readCapacity, Long writeCapacity)
+ throws ProvisionedThroughputException, DependencyException {
+ throwExceptions("createLeaseTableIfNotExists",
+ ExceptionThrowingLeaseManagerMethods.CREATELEASETABLEIFNOTEXISTS);
+
+ return leaseManager.createLeaseTableIfNotExists(readCapacity, writeCapacity);
+ }
+
+ @Override
+ public boolean leaseTableExists() throws DependencyException {
+ throwExceptions("leaseTableExists", ExceptionThrowingLeaseManagerMethods.LEASETABLEEXISTS);
+
+ return leaseManager.leaseTableExists();
+ }
+
+ @Override
+ public boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException {
+ throwExceptions("waitUntilLeaseTableExists", ExceptionThrowingLeaseManagerMethods.WAITUNTILLEASETABLEEXISTS);
+
+ return leaseManager.waitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds);
+ }
+
+ @Override
+ public List listLeases()
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ throwExceptions("listLeases", ExceptionThrowingLeaseManagerMethods.LISTLEASES);
+
+ return leaseManager.listLeases();
+ }
+
+ @Override
+ public boolean createLeaseIfNotExists(KinesisClientLease lease)
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ throwExceptions("createLeaseIfNotExists", ExceptionThrowingLeaseManagerMethods.CREATELEASEIFNOTEXISTS);
+
+ return leaseManager.createLeaseIfNotExists(lease);
+ }
+
+ @Override
+ public boolean renewLease(KinesisClientLease lease)
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ throwExceptions("renewLease", ExceptionThrowingLeaseManagerMethods.RENEWLEASE);
+
+ return leaseManager.renewLease(lease);
+ }
+
+ @Override
+ public boolean takeLease(KinesisClientLease lease, String owner)
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ throwExceptions("takeLease", ExceptionThrowingLeaseManagerMethods.TAKELEASE);
+
+ return leaseManager.takeLease(lease, owner);
+ }
+
+ @Override
+ public boolean evictLease(KinesisClientLease lease)
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ throwExceptions("evictLease", ExceptionThrowingLeaseManagerMethods.EVICTLEASE);
+
+ return leaseManager.evictLease(lease);
+ }
+
+ @Override
+ public void deleteLease(KinesisClientLease lease)
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ throwExceptions("deleteLease", ExceptionThrowingLeaseManagerMethods.DELETELEASE);
+
+ leaseManager.deleteLease(lease);
+ }
+
+ @Override
+ public boolean updateLease(KinesisClientLease lease)
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ throwExceptions("updateLease", ExceptionThrowingLeaseManagerMethods.UPDATELEASE);
+
+ return leaseManager.updateLease(lease);
+ }
+
+ @Override
+ public KinesisClientLease getLease(String shardId)
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ throwExceptions("getLease", ExceptionThrowingLeaseManagerMethods.GETLEASE);
+
+ return leaseManager.getLease(shardId);
+ }
+
+ @Override
+ public void deleteAll() throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ throwExceptions("deleteAll", ExceptionThrowingLeaseManagerMethods.DELETEALL);
+
+ leaseManager.deleteAll();
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfigurationTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfigurationTest.java
new file mode 100644
index 00000000..588c6d79
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfigurationTest.java
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import static org.junit.Assert.assertEquals;
+import junit.framework.Assert;
+
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import com.amazonaws.ClientConfiguration;
+import com.amazonaws.auth.AWSCredentialsProvider;
+import com.amazonaws.regions.Region;
+import com.amazonaws.regions.RegionUtils;
+import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient;
+import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
+import com.amazonaws.services.kinesis.AmazonKinesisClient;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory;
+import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
+import com.google.common.collect.ImmutableSet;
+
+public class KinesisClientLibConfigurationTest {
+ private static final long INVALID_LONG = 0L;
+ private static final int INVALID_INT = 0;
+
+ private static final long TEST_VALUE_LONG = 1000L;
+ private static final int TEST_VALUE_INT = 1000;
+ private static final int PARAMETER_COUNT = 6;
+
+ private static final String TEST_STRING = "TestString";
+ private static final String ALTER_STRING = "AlterString";
+
+ // We don't want any of these tests to run checkpoint validation
+ private static final boolean skipCheckpointValidationValue = false;
+
+ @Test
+ public void testKCLConfigurationConstructorWithCorrectParamters() {
+ // Test the first two constructor with default values.
+ // All of them should be positive.
+ @SuppressWarnings("unused")
+ KinesisClientLibConfiguration config =
+ new KinesisClientLibConfiguration(TEST_STRING, TEST_STRING, null, TEST_STRING);
+
+ // Test constructor with all valid arguments.
+ config =
+ new KinesisClientLibConfiguration(TEST_STRING,
+ TEST_STRING,
+ TEST_STRING,
+ null,
+ null,
+ null,
+ null,
+ TEST_VALUE_LONG,
+ TEST_STRING,
+ TEST_VALUE_INT,
+ TEST_VALUE_LONG,
+ false,
+ TEST_VALUE_LONG,
+ TEST_VALUE_LONG,
+ true,
+ new ClientConfiguration(),
+ new ClientConfiguration(),
+ new ClientConfiguration(),
+ TEST_VALUE_LONG,
+ TEST_VALUE_LONG,
+ TEST_VALUE_INT,
+ skipCheckpointValidationValue,
+ null);
+ }
+
+ @Test
+ public void testKCLConfigurationConstructorWithInvalidParamter() {
+ // Test constructor with invalid parameters.
+ // Initialization should throw an error on invalid argument.
+ // Try each argument at one time.
+ KinesisClientLibConfiguration config = null;
+ long[] longValues =
+ { TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG };
+ for (int i = 0; i < PARAMETER_COUNT; i++) {
+ longValues[i] = INVALID_LONG;
+ try {
+ config =
+ new KinesisClientLibConfiguration(TEST_STRING,
+ TEST_STRING,
+ TEST_STRING,
+ null,
+ null,
+ null,
+ null,
+ longValues[0],
+ TEST_STRING,
+ TEST_VALUE_INT,
+ longValues[1],
+ false,
+ longValues[2],
+ longValues[3],
+ true,
+ new ClientConfiguration(),
+ new ClientConfiguration(),
+ new ClientConfiguration(),
+ longValues[4],
+ longValues[5],
+ TEST_VALUE_INT,
+ skipCheckpointValidationValue,
+ null);
+ } catch (IllegalArgumentException e) {
+ System.out.println(e.getMessage());
+ }
+ longValues[i] = TEST_VALUE_LONG;
+ }
+ int[] intValues = { TEST_VALUE_INT, TEST_VALUE_INT };
+ for (int i = 0; i < 2; i++) {
+ intValues[i] = INVALID_INT;
+ try {
+ config =
+ new KinesisClientLibConfiguration(TEST_STRING,
+ TEST_STRING,
+ TEST_STRING,
+ null,
+ null,
+ null,
+ null,
+ TEST_VALUE_LONG,
+ TEST_STRING,
+ intValues[0],
+ TEST_VALUE_LONG,
+ false,
+ TEST_VALUE_LONG,
+ TEST_VALUE_LONG,
+ true,
+ new ClientConfiguration(),
+ new ClientConfiguration(),
+ new ClientConfiguration(),
+ TEST_VALUE_LONG,
+ TEST_VALUE_LONG,
+ intValues[1],
+ skipCheckpointValidationValue,
+ null);
+ } catch (IllegalArgumentException e) {
+ System.out.println(e.getMessage());
+ }
+ intValues[i] = TEST_VALUE_INT;
+ }
+ Assert.assertTrue("KCLConfiguration should return null when using negative arguments", config == null);
+ }
+
+ @Test
+ public void testKCLConfigurationUserAgent() {
+ // There are two three of setting user agent:
+ // 1) Use client configuration default user agent;
+ // 2) Pass client configurations;
+ // 3) Pass user agent.
+ // For each case, after building KCLConfiguration, KINESIS_CLIENT_LIB_USER_AGENT
+ // should be included in user agent.
+
+ // Default user agent should be "appName,KINESIS_CLIENT_LIB_USER_AGENT"
+ String expectedUserAgent = TEST_STRING + "," + KinesisClientLibConfiguration.KINESIS_CLIENT_LIB_USER_AGENT;
+ KinesisClientLibConfiguration config =
+ new KinesisClientLibConfiguration(TEST_STRING, TEST_STRING, null, TEST_STRING);
+ testContainingKCLUserAgent(config, expectedUserAgent);
+ ClientConfiguration clientConfig = new ClientConfiguration();
+ config.withCommonClientConfig(clientConfig);
+ testContainingKCLUserAgent(config, expectedUserAgent);
+
+ // Use alter string to replace app name in KCLConfiguration user agent.
+ expectedUserAgent = ALTER_STRING + "," + KinesisClientLibConfiguration.KINESIS_CLIENT_LIB_USER_AGENT;
+ clientConfig.setUserAgent(ALTER_STRING);
+ config.withCommonClientConfig(clientConfig);
+ testContainingKCLUserAgent(config, expectedUserAgent);
+ config.withUserAgent(ALTER_STRING);
+ testContainingKCLUserAgent(config, expectedUserAgent);
+ }
+
+ // Every aws client configuration in KCL configuration should contain expected user agent
+ private static void testContainingKCLUserAgent(KinesisClientLibConfiguration config, String expectedUserAgent) {
+ Assert.assertTrue("Kinesis client should contain expected User Agent", config.getKinesisClientConfiguration()
+ .getUserAgent()
+ .contains(expectedUserAgent));
+ Assert.assertTrue("DynamoDB client should contain expected User Agent", config.getDynamoDBClientConfiguration()
+ .getUserAgent()
+ .contains(expectedUserAgent));
+ Assert.assertTrue("CloudWatch client should contain expected User Agent",
+ config.getCloudWatchClientConfiguration().getUserAgent().contains(expectedUserAgent));
+ }
+
+ @Test
+ public void testKCLConfigurationWithOnlyRegionPropertyProvided() {
+ // test if the setRegion method has been called for each of the
+ // client once by setting only the region name
+ AmazonKinesisClient kclient = Mockito.mock(AmazonKinesisClient.class);
+ AmazonDynamoDBClient dclient = Mockito.mock(AmazonDynamoDBClient.class);
+ AmazonCloudWatchClient cclient = Mockito.mock(AmazonCloudWatchClient.class);
+ Region region = RegionUtils.getRegion("us-west-2");
+
+ AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class);
+ KinesisClientLibConfiguration kclConfig =
+ new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0")
+ .withRegionName("us-west-2");
+ IRecordProcessorFactory processorFactory = Mockito.mock(IRecordProcessorFactory.class);
+ new Worker(processorFactory, kclConfig, kclient, dclient, cclient);
+
+ Mockito.verify(kclient, Mockito.times(1)).setRegion(region);
+ Mockito.verify(dclient, Mockito.times(1)).setRegion(region);
+ Mockito.verify(cclient, Mockito.times(1)).setRegion(region);
+ }
+
+ @Test
+ public void testKCLConfigurationWithBothRegionAndEndpointProvided() {
+ // test if the setRegion method has been called for each of the
+ // client once and setEndpoint has been called once for kinesis
+ // client by setting kinesis endpoint
+ AmazonKinesisClient kclient = Mockito.mock(AmazonKinesisClient.class);
+ AmazonDynamoDBClient dclient = Mockito.mock(AmazonDynamoDBClient.class);
+ AmazonCloudWatchClient cclient = Mockito.mock(AmazonCloudWatchClient.class);
+ Region region = RegionUtils.getRegion("us-west-2");
+
+ AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class);
+ KinesisClientLibConfiguration kclConfig =
+ new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0")
+ .withRegionName("us-west-2")
+ .withKinesisEndpoint("https://kinesis.eu-west-1.amazonaws.com");
+ IRecordProcessorFactory processorFactory = Mockito.mock(IRecordProcessorFactory.class);
+ new Worker(processorFactory, kclConfig, kclient, dclient, cclient);
+
+ Mockito.verify(kclient, Mockito.times(1)).setRegion(region);
+ Mockito.verify(dclient, Mockito.times(1)).setRegion(region);
+ Mockito.verify(cclient, Mockito.times(1)).setRegion(region);
+ Mockito.verify(kclient, Mockito.times(1)).setEndpoint("https://kinesis.eu-west-1.amazonaws.com");
+ }
+
+ @Test
+ public void testKCLConfigurationWithSimplerWorkerConstructor() {
+ // test simpler worker constructor to see whether the region is been set
+ // by testing how many times the getRegionName and getKinesisEndpoint has
+ // has been called
+ AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class);
+ KinesisClientLibConfiguration kclConfig = Mockito.spy(
+ new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0")
+ .withRegionName("us-west-2")
+ .withKinesisEndpoint("https://kinesis.eu-west-1.amazonaws.com"));
+
+ IRecordProcessorFactory processorFactory = Mockito.mock(IRecordProcessorFactory.class);
+ new Worker(processorFactory, kclConfig);
+
+ Mockito.verify(kclConfig, Mockito.times(9)).getRegionName();
+ Mockito.verify(kclConfig, Mockito.times(4)).getKinesisEndpoint();
+
+ kclConfig = Mockito.spy(
+ new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0")
+ .withKinesisEndpoint("https://kinesis.eu-west-1.amazonaws.com"));
+
+ new Worker(processorFactory, kclConfig);
+
+ Mockito.verify(kclConfig, Mockito.times(3)).getRegionName();
+ Mockito.verify(kclConfig, Mockito.times(3)).getKinesisEndpoint();
+ }
+
+ @Test
+ public void testKCLConfigurationWithMultiRegionWithIlligalRegionName() {
+ // test with illegal region name
+ AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class);
+
+ KinesisClientLibConfiguration kclConfig =
+ new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0");
+ try {
+ kclConfig = kclConfig.withRegionName("abcd");
+ Assert.fail("No expected Exception is thrown.");
+ } catch (IllegalArgumentException e) {
+ System.out.println(e.getMessage());
+ }
+ }
+
+ @Test
+ public void testKCLConfigurationWithMultiRegionWithIlligalRegionNameInFullConstructor() {
+ // test with illegal region name
+ Mockito.mock(AWSCredentialsProvider.class);
+ try {
+ new KinesisClientLibConfiguration(TEST_STRING,
+ TEST_STRING,
+ TEST_STRING,
+ null,
+ null,
+ null,
+ null,
+ TEST_VALUE_LONG,
+ TEST_STRING,
+ 3,
+ TEST_VALUE_LONG,
+ false,
+ TEST_VALUE_LONG,
+ TEST_VALUE_LONG,
+ true,
+ new ClientConfiguration(),
+ new ClientConfiguration(),
+ new ClientConfiguration(),
+ TEST_VALUE_LONG,
+ TEST_VALUE_LONG,
+ 1,
+ skipCheckpointValidationValue,
+ "abcd");
+ Assert.fail("No expected Exception is thrown.");
+ } catch(IllegalArgumentException e) {
+ System.out.println(e.getMessage());
+ }
+ }
+
+ @Test
+ public void testKCLConfigurationMetricsDefaults() {
+ KinesisClientLibConfiguration config =
+ new KinesisClientLibConfiguration("TestApplication", "TestStream", null, "TestWorker");
+ // By default, metrics level should be detailed.
+ assertEquals(config.getMetricsLevel(), MetricsLevel.DETAILED);
+ // By default, only Operation and ShardId dimensions should be enabled.
+ assertEquals(config.getMetricsEnabledDimensions(), ImmutableSet.of("Operation", "ShardId"));
+ }
+
+ @Test
+ public void testKCLConfigurationWithMetricsLevel() {
+ KinesisClientLibConfiguration config =
+ new KinesisClientLibConfiguration("TestApplication", "TestStream", null, "TestWorker")
+ .withMetricsLevel("NONE");
+ assertEquals(config.getMetricsLevel(), MetricsLevel.NONE);
+ }
+
+ @Test
+ public void testKCLConfigurationWithMetricsEnabledDimensions() {
+ KinesisClientLibConfiguration config =
+ new KinesisClientLibConfiguration("TestApplication", "TestStream", null, "TestWorker")
+ .withMetricsEnabledDimensions(null);
+ // Operation dimension should always be there.
+ assertEquals(config.getMetricsEnabledDimensions(), ImmutableSet.of("Operation"));
+
+ config.withMetricsEnabledDimensions(ImmutableSet.of("WorkerIdentifier"));
+ // Operation dimension should always be there.
+ assertEquals(config.getMetricsEnabledDimensions(), ImmutableSet.of("Operation", "WorkerIdentifier"));
+ }
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorIntegrationTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorIntegrationTest.java
new file mode 100644
index 00000000..00c1310d
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorIntegrationTest.java
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+
+import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
+import com.amazonaws.auth.SystemPropertiesCredentialsProvider;
+import junit.framework.Assert;
+
+import org.junit.Before;
+import org.junit.Test;
+
+
+import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
+import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
+import com.amazonaws.services.kinesis.leases.exceptions.LeasingException;
+import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager;
+import com.amazonaws.services.kinesis.leases.impl.Lease;
+import com.amazonaws.services.kinesis.leases.interfaces.ILeaseRenewer;
+
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+public class KinesisClientLibLeaseCoordinatorIntegrationTest {
+
+ private static KinesisClientLeaseManager leaseManager;
+ private KinesisClientLibLeaseCoordinator coordinator;
+ private static final String TABLE_NAME = KinesisClientLibLeaseCoordinatorIntegrationTest.class.getSimpleName();
+ private static final String WORKER_ID = UUID.randomUUID().toString();
+ private final String leaseKey = "shd-1";
+
+
+ @Before
+ public void setUp() throws ProvisionedThroughputException, DependencyException, InvalidStateException {
+ final boolean useConsistentReads = true;
+ if (leaseManager == null) {
+ AmazonDynamoDBClient ddb = new AmazonDynamoDBClient(new DefaultAWSCredentialsProviderChain());
+ leaseManager =
+ new KinesisClientLeaseManager(TABLE_NAME, ddb, useConsistentReads);
+ }
+ leaseManager.createLeaseTableIfNotExists(10L, 10L);
+ leaseManager.deleteAll();
+ coordinator = new KinesisClientLibLeaseCoordinator(leaseManager, WORKER_ID, 5000L, 50L);
+ coordinator.start();
+ }
+
+ /**
+ * Tests update checkpoint success.
+ */
+ @Test
+ public void testUpdateCheckpoint() throws LeasingException {
+ TestHarnessBuilder builder = new TestHarnessBuilder();
+ builder.withLease(leaseKey, null).build();
+
+ // Run the taker and renewer in-between getting the Lease object and calling setCheckpoint
+ coordinator.runLeaseTaker();
+ coordinator.runLeaseRenewer();
+
+ KinesisClientLease lease = coordinator.getCurrentlyHeldLease(leaseKey);
+ if (lease == null) {
+ List leases = leaseManager.listLeases();
+ for (KinesisClientLease kinesisClientLease : leases) {
+ System.out.println(kinesisClientLease);
+ }
+ }
+
+ assertThat(lease, notNullValue());
+ ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint");
+ // lease's leaseCounter is wrong at this point, but it shouldn't matter.
+ Assert.assertTrue(coordinator.setCheckpoint(lease.getLeaseKey(), newCheckpoint, lease.getConcurrencyToken()));
+
+ Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey());
+
+ lease.setLeaseCounter(lease.getLeaseCounter() + 1);
+ lease.setCheckpoint(newCheckpoint);
+ lease.setLeaseOwner(coordinator.getWorkerIdentifier());
+ Assert.assertEquals(lease, fromDynamo);
+ }
+
+ /**
+ * Tests updateCheckpoint when the lease has changed out from under us.
+ */
+ @Test
+ public void testUpdateCheckpointLeaseUpdated() throws LeasingException {
+ TestHarnessBuilder builder = new TestHarnessBuilder();
+ builder.withLease(leaseKey, null).build();
+
+ coordinator.runLeaseTaker();
+ coordinator.runLeaseRenewer();
+ KinesisClientLease lease = coordinator.getCurrentlyHeldLease(leaseKey);
+
+ assertThat(lease, notNullValue());
+ leaseManager.renewLease(coordinator.getCurrentlyHeldLease(leaseKey));
+
+ ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint");
+ Assert.assertFalse(coordinator.setCheckpoint(lease.getLeaseKey(), newCheckpoint, lease.getConcurrencyToken()));
+
+ Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey());
+
+ lease.setLeaseCounter(lease.getLeaseCounter() + 1);
+ // Counter and owner changed, but checkpoint did not.
+ lease.setLeaseOwner(coordinator.getWorkerIdentifier());
+ Assert.assertEquals(lease, fromDynamo);
+ }
+
+ /**
+ * Tests updateCheckpoint with a bad concurrency token.
+ */
+ @Test
+ public void testUpdateCheckpointBadConcurrencyToken() throws LeasingException {
+ TestHarnessBuilder builder = new TestHarnessBuilder();
+ builder.withLease(leaseKey, null).build();
+
+ coordinator.runLeaseTaker();
+ coordinator.runLeaseRenewer();
+ KinesisClientLease lease = coordinator.getCurrentlyHeldLease(leaseKey);
+
+ assertThat(lease, notNullValue());
+
+ ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint");
+ Assert.assertFalse(coordinator.setCheckpoint(lease.getLeaseKey(), newCheckpoint, UUID.randomUUID()));
+
+ Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey());
+
+ // Owner should be the only thing that changed.
+ lease.setLeaseOwner(coordinator.getWorkerIdentifier());
+ Assert.assertEquals(lease, fromDynamo);
+ }
+
+ public static class TestHarnessBuilder {
+
+ private long currentTimeNanos;
+
+ private Map leases = new HashMap();
+
+ private Callable timeProvider = new Callable() {
+
+ @Override
+ public Long call() throws Exception {
+ return currentTimeNanos;
+ }
+
+ };
+
+ public TestHarnessBuilder withLease(String shardId) {
+ return withLease(shardId, "leaseOwner");
+ }
+
+ public TestHarnessBuilder withLease(String shardId, String owner) {
+ KinesisClientLease lease = new KinesisClientLease();
+ lease.setCheckpoint(new ExtendedSequenceNumber("checkpoint"));
+ lease.setOwnerSwitchesSinceCheckpoint(0L);
+ lease.setLeaseCounter(0L);
+ lease.setLeaseOwner(owner);
+ lease.setParentShardIds(Collections.singleton("parentShardId"));
+ lease.setLeaseKey(shardId);
+
+ leases.put(shardId, lease);
+ return this;
+ }
+
+ public Map build() throws LeasingException {
+ for (KinesisClientLease lease : leases.values()) {
+ leaseManager.createLeaseIfNotExists(lease);
+ if (lease.getLeaseOwner() != null) {
+ lease.setLastCounterIncrementNanos(System.nanoTime());
+ }
+ }
+
+ currentTimeNanos = System.nanoTime();
+
+ return leases;
+ }
+
+ public void passTime(long millis) {
+ currentTimeNanos += millis * 1000000;
+ }
+
+ private void mutateAssert(String newWorkerIdentifier, KinesisClientLease original, KinesisClientLease actual) {
+ original.setLeaseCounter(original.getLeaseCounter() + 1);
+ if (original.getLeaseOwner() != null && !newWorkerIdentifier.equals(original.getLeaseOwner())) {
+ original.setOwnerSwitchesSinceCheckpoint(original.getOwnerSwitchesSinceCheckpoint() + 1);
+ }
+ original.setLeaseOwner(newWorkerIdentifier);
+
+ Assert.assertEquals(original, actual); // Assert the contents of the lease
+ }
+
+ public void addLeasesToRenew(ILeaseRenewer renewer, String... shardIds)
+ throws DependencyException, InvalidStateException {
+ List leasesToRenew = new ArrayList();
+
+ for (String shardId : shardIds) {
+ KinesisClientLease lease = leases.get(shardId);
+ Assert.assertNotNull(lease);
+ leasesToRenew.add(lease);
+ }
+
+ renewer.addLeasesToRenew(leasesToRenew);
+ }
+
+ public Map renewMutateAssert(ILeaseRenewer renewer,
+ String... renewedShardIds) throws DependencyException, InvalidStateException {
+ renewer.renewLeases();
+
+ Map heldLeases = renewer.getCurrentlyHeldLeases();
+ Assert.assertEquals(renewedShardIds.length, heldLeases.size());
+
+ for (String shardId : renewedShardIds) {
+ KinesisClientLease original = leases.get(shardId);
+ Assert.assertNotNull(original);
+
+ KinesisClientLease actual = heldLeases.get(shardId);
+ Assert.assertNotNull(actual);
+
+ original.setLeaseCounter(original.getLeaseCounter() + 1);
+ Assert.assertEquals(original, actual);
+ }
+
+ return heldLeases;
+ }
+
+ public void renewAllLeases() throws LeasingException {
+ for (KinesisClientLease lease : leases.values()) {
+ leaseManager.renewLease(lease);
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorTest.java
new file mode 100644
index 00000000..11962d8f
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorTest.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.doReturn;
+
+import java.util.UUID;
+
+import junit.framework.Assert;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
+import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
+import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
+import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
+
+public class KinesisClientLibLeaseCoordinatorTest {
+ private static final String SHARD_ID = "shardId-test";
+ private static final String WORK_ID = "workId-test";
+ private static final long TEST_LONG = 1000L;
+ private static final ExtendedSequenceNumber TEST_CHKPT = new ExtendedSequenceNumber("string-test");
+ private static final UUID TEST_UUID = UUID.randomUUID();
+
+ @SuppressWarnings("rawtypes")
+ @Mock
+ private ILeaseManager mockLeaseManager;
+
+ private KinesisClientLibLeaseCoordinator leaseCoordinator;
+
+ @SuppressWarnings("unchecked")
+ @Before
+ public void setUpLeaseCoordinator() throws ProvisionedThroughputException, DependencyException {
+ // Initialize the annotation
+ MockitoAnnotations.initMocks(this);
+ // Set up lease coordinator
+ doReturn(true).when(mockLeaseManager).createLeaseTableIfNotExists(anyLong(), anyLong());
+ leaseCoordinator = new KinesisClientLibLeaseCoordinator(mockLeaseManager, WORK_ID, TEST_LONG, TEST_LONG);
+ }
+
+ @Test(expected = ShutdownException.class)
+ public void testSetCheckpointWithUnownedShardId()
+ throws KinesisClientLibException, DependencyException, InvalidStateException, ProvisionedThroughputException {
+ final boolean succeess = leaseCoordinator.setCheckpoint(SHARD_ID, TEST_CHKPT, TEST_UUID);
+ Assert.assertFalse("Set Checkpoint should return failure", succeess);
+ leaseCoordinator.setCheckpoint(SHARD_ID, TEST_CHKPT, TEST_UUID.toString());
+ }
+
+ @Test(expected = DependencyException.class)
+ public void testWaitLeaseTableTimeout()
+ throws DependencyException, ProvisionedThroughputException, IllegalStateException {
+ // Set mock lease manager to return false in waiting
+ doReturn(false).when(mockLeaseManager).waitUntilLeaseTableExists(anyLong(), anyLong());
+ leaseCoordinator.initialize();
+ }
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisDataFetcherTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisDataFetcherTest.java
new file mode 100644
index 00000000..af9aedc5
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisDataFetcherTest.java
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+import com.amazonaws.services.kinesis.model.Record;
+import com.amazonaws.services.kinesis.model.ResourceNotFoundException;
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
+import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
+
+/**
+ * Unit tests for KinesisDataFetcher.
+ */
+public class KinesisDataFetcherTest {
+
+ private static final int MAX_RECORDS = 1;
+ private static final String SHARD_ID = "shardId-1";
+ private static final String AFTER_SEQUENCE_NUMBER = ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString();
+ private static final String AT_SEQUENCE_NUMBER = ShardIteratorType.AT_SEQUENCE_NUMBER.toString();
+ private static final ShardInfo SHARD_INFO = new ShardInfo(SHARD_ID, null, null);
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ MetricsHelper.startScope(new NullMetricsFactory(), "KinesisDataFetcherTest");
+ }
+
+ /**
+ * Test initialize() with the LATEST iterator instruction
+ */
+ @Test
+ public final void testInitializeLatest() throws Exception {
+ testInitializeAndFetch(ShardIteratorType.LATEST.toString(), ShardIteratorType.LATEST.toString());
+ }
+
+ /**
+ * Test initialize() with the TIME_ZERO iterator instruction
+ */
+ @Test
+ public final void testInitializeTimeZero() throws Exception {
+ testInitializeAndFetch(ShardIteratorType.TRIM_HORIZON.toString(), ShardIteratorType.TRIM_HORIZON.toString());
+ }
+
+ /**
+ * Test initialize() when a flushpoint exists.
+ */
+ @Test
+ public final void testInitializeFlushpoint() throws Exception {
+ testInitializeAndFetch("foo", "123");
+ }
+
+ /**
+ * Test initialize() with an invalid iterator instruction
+ */
+ @Test(expected = IllegalArgumentException.class)
+ public final void testInitializeInvalid() throws Exception {
+ testInitializeAndFetch("foo", null);
+ }
+
+ @Test
+ public void testadvanceIteratorTo() throws KinesisClientLibException {
+ IKinesisProxy kinesis = mock(IKinesisProxy.class);
+ ICheckpoint checkpoint = mock(ICheckpoint.class);
+
+ KinesisDataFetcher fetcher = new KinesisDataFetcher(kinesis, SHARD_INFO);
+
+ String iteratorA = "foo";
+ String iteratorB = "bar";
+ String seqA = "123";
+ String seqB = "456";
+ GetRecordsResult outputA = new GetRecordsResult();
+ List recordsA = new ArrayList();
+ outputA.setRecords(recordsA);
+ GetRecordsResult outputB = new GetRecordsResult();
+ List recordsB = new ArrayList();
+ outputB.setRecords(recordsB);
+
+ when(kinesis.getIterator(SHARD_ID, AT_SEQUENCE_NUMBER, seqA)).thenReturn(iteratorA);
+ when(kinesis.getIterator(SHARD_ID, AT_SEQUENCE_NUMBER, seqB)).thenReturn(iteratorB);
+ when(kinesis.get(iteratorA, MAX_RECORDS)).thenReturn(outputA);
+ when(kinesis.get(iteratorB, MAX_RECORDS)).thenReturn(outputB);
+
+ when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqA));
+ fetcher.initialize(seqA);
+
+ fetcher.advanceIteratorTo(seqA);
+ Assert.assertEquals(recordsA, fetcher.getRecords(MAX_RECORDS).getRecords());
+
+ fetcher.advanceIteratorTo(seqB);
+ Assert.assertEquals(recordsB, fetcher.getRecords(MAX_RECORDS).getRecords());
+ }
+
+ @Test
+ public void testadvanceIteratorToTrimHorizonAndLatest() {
+ IKinesisProxy kinesis = mock(IKinesisProxy.class);
+
+ KinesisDataFetcher fetcher = new KinesisDataFetcher(kinesis, SHARD_INFO);
+
+ String iteratorHorizon = "horizon";
+ when(kinesis.getIterator(SHARD_ID,
+ ShardIteratorType.TRIM_HORIZON.toString(), null)).thenReturn(iteratorHorizon);
+ fetcher.advanceIteratorTo(ShardIteratorType.TRIM_HORIZON.toString());
+ Assert.assertEquals(iteratorHorizon, fetcher.getNextIterator());
+
+ String iteratorLatest = "latest";
+ when(kinesis.getIterator(SHARD_ID, ShardIteratorType.LATEST.toString(), null)).thenReturn(iteratorLatest);
+ fetcher.advanceIteratorTo(ShardIteratorType.LATEST.toString());
+ Assert.assertEquals(iteratorLatest, fetcher.getNextIterator());
+ }
+
+ @Test
+ public void testGetRecordsWithResourceNotFoundException() {
+ // Set up arguments used by proxy
+ String nextIterator = "TestShardIterator";
+ int maxRecords = 100;
+
+ // Set up proxy mock methods
+ KinesisProxy mockProxy = mock(KinesisProxy.class);
+ doReturn(nextIterator).when(mockProxy).getIterator(SHARD_ID, ShardIteratorType.LATEST.toString(), null);
+ doThrow(new ResourceNotFoundException("Test Exception")).when(mockProxy).get(nextIterator, maxRecords);
+
+ // Create data fectcher and initialize it with latest type checkpoint
+ KinesisDataFetcher dataFetcher = new KinesisDataFetcher(mockProxy, SHARD_INFO);
+ dataFetcher.initialize(SentinelCheckpoint.LATEST.toString());
+ // Call getRecords of dataFetcher which will throw an exception
+ dataFetcher.getRecords(maxRecords);
+
+ // Test shard has reached the end
+ Assert.assertTrue("Shard should reach the end", dataFetcher.isShardEndReached());
+ }
+
+ private void testInitializeAndFetch(String iteratorType, String seqNo) throws Exception {
+ IKinesisProxy kinesis = mock(IKinesisProxy.class);
+ String iterator = "foo";
+ List expectedRecords = new ArrayList();
+ GetRecordsResult response = new GetRecordsResult();
+ response.setRecords(expectedRecords);
+
+
+ when(kinesis.getIterator(SHARD_ID, iteratorType, null)).thenReturn(iterator);
+ when(kinesis.getIterator(SHARD_ID, AT_SEQUENCE_NUMBER, seqNo)).thenReturn(iterator);
+ when(kinesis.get(iterator, MAX_RECORDS)).thenReturn(response);
+
+ ICheckpoint checkpoint = mock(ICheckpoint.class);
+ when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqNo));
+
+ KinesisDataFetcher fetcher = new KinesisDataFetcher(kinesis, SHARD_INFO);
+
+ fetcher.initialize(seqNo);
+ List actualRecords = fetcher.getRecords(MAX_RECORDS).getRecords();
+
+ Assert.assertEquals(expectedRecords, actualRecords);
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ProcessTaskTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ProcessTaskTest.java
new file mode 100644
index 00000000..5385d05e
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ProcessTaskTest.java
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.math.BigInteger;
+import java.nio.ByteBuffer;
+import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.clientlibrary.types.Messages.AggregatedRecord;
+import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
+import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException;
+import com.amazonaws.services.kinesis.model.Record;
+import com.google.protobuf.ByteString;
+
+public class ProcessTaskTest {
+
+ @SuppressWarnings("serial")
+ private static class RecordSubclass extends Record {}
+
+ private static final byte[] TEST_DATA = new byte[] { 1, 2, 3, 4 };
+
+ private final int maxRecords = 100;
+ private final String shardId = "shard-test";
+ private final long idleTimeMillis = 1000L;
+ private final long taskBackoffTimeMillis = 1L;
+ private final boolean callProcessRecordsForEmptyRecordList = true;
+ // We don't want any of these tests to run checkpoint validation
+ private final boolean skipCheckpointValidationValue = false;
+ private final InitialPositionInStream initialPositionInStream = InitialPositionInStream.LATEST;
+
+ private @Mock KinesisDataFetcher mockDataFetcher;
+ private @Mock IRecordProcessor mockRecordProcessor;
+ private @Mock RecordProcessorCheckpointer mockCheckpointer;
+
+ private List processedRecords;
+ private ExtendedSequenceNumber newLargestPermittedCheckpointValue;
+
+ private ProcessTask processTask;
+
+ @Before
+ public void setUpProcessTask() {
+ // Initialize the annotation
+ MockitoAnnotations.initMocks(this);
+ // Set up process task
+ final StreamConfig config =
+ new StreamConfig(null, maxRecords, idleTimeMillis, callProcessRecordsForEmptyRecordList,
+ skipCheckpointValidationValue, initialPositionInStream);
+ final ShardInfo shardInfo = new ShardInfo(shardId, null, null);
+ processTask = new ProcessTask(
+ shardInfo, config, mockRecordProcessor, mockCheckpointer, mockDataFetcher, taskBackoffTimeMillis);
+ }
+
+ @Test
+ public void testProcessTaskWithProvisionedThroughputExceededException() {
+ // Set data fetcher to throw exception
+ doReturn(false).when(mockDataFetcher).isShardEndReached();
+ doThrow(new ProvisionedThroughputExceededException("Test Exception")).when(mockDataFetcher)
+ .getRecords(maxRecords);
+
+ TaskResult result = processTask.call();
+ assertTrue("Result should contain ProvisionedThroughputExceededException",
+ result.getException() instanceof ProvisionedThroughputExceededException);
+ }
+
+ @Test
+ public void testProcessTaskWithNonExistentStream() {
+ // Data fetcher returns a null Result when the stream does not exist
+ doReturn(null).when(mockDataFetcher).getRecords(maxRecords);
+
+ TaskResult result = processTask.call();
+ assertNull("Task should not throw an exception", result.getException());
+ }
+
+ @Test
+ public void testProcessTaskWithShardEndReached() {
+ // Set data fetcher to return true for shard end reached
+ doReturn(true).when(mockDataFetcher).isShardEndReached();
+
+ TaskResult result = processTask.call();
+ assertTrue("Result should contain shardEndReached true", result.isShardEndReached());
+ }
+
+ @Test
+ public void testNonAggregatedKinesisRecord() {
+ final String sqn = new BigInteger(128, new Random()).toString();
+ final String pk = UUID.randomUUID().toString();
+ final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS));
+ final Record r = new Record()
+ .withPartitionKey(pk)
+ .withData(ByteBuffer.wrap(TEST_DATA))
+ .withSequenceNumber(sqn)
+ .withApproximateArrivalTimestamp(ts);
+
+ testWithRecord(r);
+
+ assertEquals(1, processedRecords.size());
+
+ Record pr = processedRecords.get(0);
+ assertEquals(pk, pr.getPartitionKey());
+ assertEquals(ts, pr.getApproximateArrivalTimestamp());
+ byte[] b = new byte[pr.getData().remaining()];
+ pr.getData().get(b);
+ assertTrue(Arrays.equals(TEST_DATA, b));
+
+ assertEquals(sqn, newLargestPermittedCheckpointValue.getSequenceNumber());
+ assertEquals(0, newLargestPermittedCheckpointValue.getSubSequenceNumber());
+ }
+
+ @Test
+ public void testDoesNotDeaggregateSubclassOfRecord() {
+ final String sqn = new BigInteger(128, new Random()).toString();
+ final Record r = new RecordSubclass()
+ .withSequenceNumber(sqn)
+ .withData(ByteBuffer.wrap(new byte[0]));
+
+ testWithRecord(r);
+
+ assertEquals(1, processedRecords.size(), 1);
+ assertSame(r, processedRecords.get(0));
+
+ assertEquals(sqn, newLargestPermittedCheckpointValue.getSequenceNumber());
+ assertEquals(0, newLargestPermittedCheckpointValue.getSubSequenceNumber());
+ }
+
+ @Test
+ public void testDeaggregatesRecord() {
+ final String sqn = new BigInteger(128, new Random()).toString();
+ final String pk = UUID.randomUUID().toString();
+ final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS));
+ final Record r = new Record()
+ .withPartitionKey("-")
+ .withData(generateAggregatedRecord(pk))
+ .withSequenceNumber(sqn)
+ .withApproximateArrivalTimestamp(ts);
+
+ testWithRecord(r);
+
+ assertEquals(3, processedRecords.size());
+ for (Record pr : processedRecords) {
+ assertTrue(pr instanceof UserRecord);
+ assertEquals(pk, pr.getPartitionKey());
+ assertEquals(ts, pr.getApproximateArrivalTimestamp());
+ byte[] b = new byte[pr.getData().remaining()];
+ pr.getData().get(b);
+ assertTrue(Arrays.equals(TEST_DATA, b));
+ }
+
+ assertEquals(sqn, newLargestPermittedCheckpointValue.getSequenceNumber());
+ assertEquals(processedRecords.size() - 1, newLargestPermittedCheckpointValue.getSubSequenceNumber());
+ }
+
+ @Test
+ public void testDeaggregatesRecordWithNoArrivalTimestamp() {
+ final String sqn = new BigInteger(128, new Random()).toString();
+ final String pk = UUID.randomUUID().toString();
+ final Record r = new Record()
+ .withPartitionKey("-")
+ .withData(generateAggregatedRecord(pk))
+ .withSequenceNumber(sqn);
+
+ testWithRecord(r);
+
+ assertEquals(3, processedRecords.size());
+ for (Record pr : processedRecords) {
+ assertTrue(pr instanceof UserRecord);
+ assertEquals(pk, pr.getPartitionKey());
+ assertNull(pr.getApproximateArrivalTimestamp());
+ }
+ }
+
+ @Test
+ public void testLargestPermittedCheckpointValue() {
+ // Some sequence number value from previous processRecords call to mock.
+ final BigInteger previousCheckpointSqn = new BigInteger(128, new Random());
+
+ // Values for this processRecords call.
+ final int numberOfRecords = 104;
+ // Start these batch of records's sequence number that is greater than previous checkpoint value.
+ final BigInteger startingSqn = previousCheckpointSqn.add(BigInteger.valueOf(10));
+ final List records = generateConsecutiveRecords(
+ numberOfRecords, "-", ByteBuffer.wrap(TEST_DATA), new Date(), startingSqn);
+
+ testWithRecords(records, new ExtendedSequenceNumber(previousCheckpointSqn.toString()),
+ new ExtendedSequenceNumber(previousCheckpointSqn.toString()));
+
+ final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber(
+ startingSqn.add(BigInteger.valueOf(numberOfRecords - 1)).toString());
+ assertEquals(expectedLargestPermittedEsqn, newLargestPermittedCheckpointValue);
+ }
+
+ @Test
+ public void testLargestPermittedCheckpointValueWithEmptyRecords() {
+ // Some sequence number value from previous processRecords call.
+ final BigInteger baseSqn = new BigInteger(128, new Random());
+ final ExtendedSequenceNumber lastCheckpointEspn = new ExtendedSequenceNumber(baseSqn.toString());
+ final ExtendedSequenceNumber largestPermittedEsqn = new ExtendedSequenceNumber(
+ baseSqn.add(BigInteger.valueOf(100)).toString());
+
+ testWithRecords(Collections.emptyList(), lastCheckpointEspn, largestPermittedEsqn);
+
+ // Make sure that even with empty records, largest permitted sequence number does not change.
+ assertEquals(largestPermittedEsqn, newLargestPermittedCheckpointValue);
+ }
+
+ @Test
+ public void testFilterBasedOnLastCheckpointValue() {
+ // Explanation of setup:
+ // * Assume in previous processRecord call, user got 3 sub-records that all belonged to one
+ // Kinesis record. So sequence number was X, and sub-sequence numbers were 0, 1, 2.
+ // * 2nd sub-record was checkpointed (extended sequnce number X.1).
+ // * Worker crashed and restarted. So now DDB has checkpoint value of X.1.
+ // Test:
+ // * Now in the subsequent processRecords call, KCL should filter out X.0 and X.1.
+ final BigInteger previousCheckpointSqn = new BigInteger(128, new Random());
+ final long previousCheckpointSsqn = 1;
+
+ // Values for this processRecords call.
+ final String startingSqn = previousCheckpointSqn.toString();
+ final String pk = UUID.randomUUID().toString();
+ final Record r = new Record()
+ .withPartitionKey("-")
+ .withData(generateAggregatedRecord(pk))
+ .withSequenceNumber(startingSqn);
+
+ testWithRecords(Collections.singletonList(r),
+ new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn),
+ new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn));
+
+ // First two records should be dropped - and only 1 remaining records should be there.
+ assertEquals(1, processedRecords.size());
+ assertTrue(processedRecords.get(0) instanceof UserRecord);
+
+ // Verify user record's extended sequence number and other fields.
+ final UserRecord pr = (UserRecord)processedRecords.get(0);
+ assertEquals(pk, pr.getPartitionKey());
+ assertEquals(startingSqn, pr.getSequenceNumber());
+ assertEquals(previousCheckpointSsqn + 1, pr.getSubSequenceNumber());
+ assertNull(pr.getApproximateArrivalTimestamp());
+
+ // Expected largest permitted sequence number will be last sub-record sequence number.
+ final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber(
+ previousCheckpointSqn.toString(), 2L);
+ assertEquals(expectedLargestPermittedEsqn, newLargestPermittedCheckpointValue);
+ }
+
+ private void testWithRecord(Record record) {
+ testWithRecords(Collections.singletonList(record),
+ ExtendedSequenceNumber.TRIM_HORIZON, ExtendedSequenceNumber.TRIM_HORIZON);
+ }
+
+ private void testWithRecords(List records,
+ ExtendedSequenceNumber lastCheckpointValue,
+ ExtendedSequenceNumber largestPermittedCheckpointValue) {
+ when(mockDataFetcher.getRecords(anyInt())).thenReturn(
+ new GetRecordsResult().withRecords(records));
+ when(mockCheckpointer.getLastCheckpointValue()).thenReturn(lastCheckpointValue);
+ when(mockCheckpointer.getLargestPermittedCheckpointValue()).thenReturn(largestPermittedCheckpointValue);
+ processTask.call();
+
+ ArgumentCaptor priCaptor = ArgumentCaptor.forClass(ProcessRecordsInput.class);
+ verify(mockRecordProcessor).processRecords(priCaptor.capture());
+ processedRecords = priCaptor.getValue().getRecords();
+
+ ArgumentCaptor esnCaptor = ArgumentCaptor.forClass(ExtendedSequenceNumber.class);
+ verify(mockCheckpointer).setLargestPermittedCheckpointValue(esnCaptor.capture());
+ newLargestPermittedCheckpointValue = esnCaptor.getValue();
+ }
+
+ /**
+ * See the KPL documentation on GitHub for more details about the binary
+ * format.
+ *
+ * @param pk
+ * Partition key to use. All the records will have the same
+ * partition key.
+ * @return ByteBuffer containing the serialized form of the aggregated
+ * record, along with the necessary header and footer.
+ */
+ private static ByteBuffer generateAggregatedRecord(String pk) {
+ ByteBuffer bb = ByteBuffer.allocate(1024);
+ bb.put(new byte[] {-13, -119, -102, -62 });
+
+ com.amazonaws.services.kinesis.clientlibrary.types.Messages.Record r =
+ com.amazonaws.services.kinesis.clientlibrary.types.Messages.Record.newBuilder()
+ .setData(ByteString.copyFrom(TEST_DATA))
+ .setPartitionKeyIndex(0)
+ .build();
+
+ byte[] payload = AggregatedRecord.newBuilder()
+ .addPartitionKeyTable(pk)
+ .addRecords(r)
+ .addRecords(r)
+ .addRecords(r)
+ .build()
+ .toByteArray();
+
+ bb.put(payload);
+ bb.put(md5(payload));
+ bb.limit(bb.position());
+ bb.rewind();
+ return bb;
+ }
+
+ private static List generateConsecutiveRecords(
+ int numberOfRecords, String partitionKey, ByteBuffer data,
+ Date arrivalTimestamp, BigInteger startSequenceNumber) {
+ List records = new ArrayList<>();
+ for (int i = 0 ; i < numberOfRecords ; ++i) {
+ records.add(new Record()
+ .withPartitionKey(partitionKey)
+ .withData(data)
+ .withSequenceNumber(startSequenceNumber.add(BigInteger.valueOf(i)).toString())
+ .withApproximateArrivalTimestamp(arrivalTimestamp));
+ }
+ return records;
+ }
+
+ private static byte[] md5(byte[] b) {
+ try {
+ MessageDigest md = MessageDigest.getInstance("MD5");
+ return md.digest(b);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/RecordProcessorCheckpointerTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/RecordProcessorCheckpointerTest.java
new file mode 100644
index 00000000..4741ea14
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/RecordProcessorCheckpointerTest.java
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.InMemoryCheckpointImpl;
+import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
+import com.amazonaws.services.kinesis.model.Record;
+
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Matchers.anyString;
+
+/**
+ *
+ */
+public class RecordProcessorCheckpointerTest {
+ private String startingSequenceNumber = "13";
+ private ExtendedSequenceNumber startingExtendedSequenceNumber = new ExtendedSequenceNumber(startingSequenceNumber);
+ private String testConcurrencyToken = "testToken";
+ private ICheckpoint checkpoint;
+ private String shardId = "shardId-123";
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ checkpoint = new InMemoryCheckpointImpl(startingSequenceNumber);
+ // A real checkpoint will return a checkpoint value after it is initialized.
+ checkpoint.setCheckpoint(shardId, startingExtendedSequenceNumber, testConcurrencyToken);
+ Assert.assertEquals(this.startingExtendedSequenceNumber, checkpoint.getCheckpoint(shardId));
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ /**
+ * Test method for
+ * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint()}.
+ */
+ @Test
+ public final void testCheckpoint() throws Exception {
+ ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
+
+ // First call to checkpoint
+ RecordProcessorCheckpointer processingCheckpointer =
+ new RecordProcessorCheckpointer(shardInfo, checkpoint, null);
+ processingCheckpointer.setLargestPermittedCheckpointValue(startingExtendedSequenceNumber);
+ processingCheckpointer.checkpoint();
+ Assert.assertEquals(startingExtendedSequenceNumber, checkpoint.getCheckpoint(shardId));
+
+ // Advance checkpoint
+ ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("5019");
+
+ processingCheckpointer.setLargestPermittedCheckpointValue(sequenceNumber);
+ processingCheckpointer.checkpoint();
+ Assert.assertEquals(sequenceNumber, checkpoint.getCheckpoint(shardId));
+ }
+
+ /**
+ * Test method for
+ * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(Record record)}.
+ */
+ @Test
+ public final void testCheckpointRecord() throws Exception {
+ ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
+ SequenceNumberValidator sequenceNumberValidator =
+ new SequenceNumberValidator(null, shardId, false);
+ RecordProcessorCheckpointer processingCheckpointer =
+ new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator);
+ processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
+ ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5025");
+ Record record = new Record().withSequenceNumber("5025");
+ processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber);
+ processingCheckpointer.checkpoint(record);
+ Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
+ }
+
+ /**
+ * Test method for
+ * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(UserRecord record)}.
+ */
+ @Test
+ public final void testCheckpointSubRecord() throws Exception {
+ ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
+ SequenceNumberValidator sequenceNumberValidator =
+ new SequenceNumberValidator(null, shardId, false);
+ RecordProcessorCheckpointer processingCheckpointer =
+ new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator);
+ processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
+ ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030");
+ Record record = new Record().withSequenceNumber("5030");
+ UserRecord subRecord = new UserRecord(record);
+ processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber);
+ processingCheckpointer.checkpoint(subRecord);
+ Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
+ }
+
+ /**
+ * Test method for
+ * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(String sequenceNumber)}.
+ */
+ @Test
+ public final void testCheckpointSequenceNumber() throws Exception {
+ ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
+ SequenceNumberValidator sequenceNumberValidator =
+ new SequenceNumberValidator(null, shardId, false);
+ RecordProcessorCheckpointer processingCheckpointer =
+ new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator);
+ processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
+ ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035");
+ processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber);
+ processingCheckpointer.checkpoint("5035");
+ Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
+ }
+
+ /**
+ * Test method for
+ * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(String sequenceNumber, long subSequenceNumber)}.
+ */
+ @Test
+ public final void testCheckpointExtendedSequenceNumber() throws Exception {
+ ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
+ SequenceNumberValidator sequenceNumberValidator =
+ new SequenceNumberValidator(null, shardId, false);
+ RecordProcessorCheckpointer processingCheckpointer =
+ new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator);
+ processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
+ ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040");
+ processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber);
+ processingCheckpointer.checkpoint("5040", 0);
+ Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
+ }
+
+ /**
+ * Test method for update()
+ *
+ */
+ @Test
+ public final void testUpdate() throws Exception {
+ ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
+
+ RecordProcessorCheckpointer checkpointer = new RecordProcessorCheckpointer(shardInfo, checkpoint, null);
+
+ ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("10");
+ checkpointer.setLargestPermittedCheckpointValue(sequenceNumber);
+ Assert.assertEquals(sequenceNumber, checkpointer.getLargestPermittedCheckpointValue());
+
+ sequenceNumber = new ExtendedSequenceNumber("90259185948592875928375908214918273491783097");
+ checkpointer.setLargestPermittedCheckpointValue(sequenceNumber);
+ Assert.assertEquals(sequenceNumber, checkpointer.getLargestPermittedCheckpointValue());
+ }
+
+ /*
+ * This test is a mixed test of checking some basic functionality of checkpointing at a sequence number and making
+ * sure certain bounds checks and validations are being performed inside the checkpointer to prevent clients from
+ * checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be checkpointing
+ */
+ @Test
+ public final void testClientSpecifiedCheckpoint() throws Exception {
+ ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
+
+ SequenceNumberValidator validator = mock(SequenceNumberValidator.class);
+ Mockito.doNothing().when(validator).validateSequenceNumber(anyString());
+ RecordProcessorCheckpointer processingCheckpointer =
+ new RecordProcessorCheckpointer(shardInfo, checkpoint, validator);
+
+ // Several checkpoints we're gonna hit
+ ExtendedSequenceNumber tooSmall = new ExtendedSequenceNumber("2");
+ ExtendedSequenceNumber firstSequenceNumber = checkpoint.getCheckpoint(shardId); // 13
+ ExtendedSequenceNumber secondSequenceNumber = new ExtendedSequenceNumber("127");
+ ExtendedSequenceNumber thirdSequenceNumber = new ExtendedSequenceNumber("5019");
+ ExtendedSequenceNumber lastSequenceNumberOfShard = new ExtendedSequenceNumber("6789");
+ ExtendedSequenceNumber tooBigSequenceNumber = new ExtendedSequenceNumber("9000");
+
+ processingCheckpointer.setInitialCheckpointValue(firstSequenceNumber);
+ processingCheckpointer.setLargestPermittedCheckpointValue(thirdSequenceNumber);
+
+ // confirm that we cannot move backward
+ try {
+ processingCheckpointer.checkpoint(tooSmall.getSequenceNumber(), tooSmall.getSubSequenceNumber());
+ Assert.fail("You shouldn't be able to checkpoint earlier than the initial checkpoint.");
+ } catch (IllegalArgumentException e) {
+ // yay!
+ }
+
+ // advance to first
+ processingCheckpointer.checkpoint(firstSequenceNumber.getSequenceNumber(), firstSequenceNumber.getSubSequenceNumber());
+ Assert.assertEquals(firstSequenceNumber, checkpoint.getCheckpoint(shardId));
+ processingCheckpointer.checkpoint(firstSequenceNumber.getSequenceNumber(), firstSequenceNumber.getSubSequenceNumber());
+ Assert.assertEquals(firstSequenceNumber, checkpoint.getCheckpoint(shardId));
+
+ // advance to second
+ processingCheckpointer.checkpoint(secondSequenceNumber.getSequenceNumber(), secondSequenceNumber.getSubSequenceNumber());
+ Assert.assertEquals(secondSequenceNumber, checkpoint.getCheckpoint(shardId));
+
+ ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt =
+ { tooSmall, // Shouldn't be able to move before the first value we ever checkpointed
+ firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number
+ tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer
+ lastSequenceNumberOfShard, // Just another big value that we will use later
+ null, // Not a valid sequence number
+ new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string
+ ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max
+ ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value
+ ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value
+ };
+ for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) {
+ try {
+ processingCheckpointer.checkpoint(badCheckpointValue.getSequenceNumber(), badCheckpointValue.getSubSequenceNumber());
+ fail("checkpointing at bad or out of order sequence didn't throw exception");
+ } catch (IllegalArgumentException e) {
+
+ } catch (NullPointerException e) {
+
+ }
+ Assert.assertEquals("Checkpoint value should not have changed",
+ secondSequenceNumber,
+ checkpoint.getCheckpoint(shardId));
+ Assert.assertEquals("Last checkpoint value should not have changed",
+ secondSequenceNumber,
+ processingCheckpointer.getLastCheckpointValue());
+ Assert.assertEquals("Largest sequence number should not have changed",
+ thirdSequenceNumber,
+ processingCheckpointer.getLargestPermittedCheckpointValue());
+ }
+
+ // advance to third number
+ processingCheckpointer.checkpoint(thirdSequenceNumber.getSequenceNumber(), thirdSequenceNumber.getSubSequenceNumber());
+ Assert.assertEquals(thirdSequenceNumber, checkpoint.getCheckpoint(shardId));
+
+ // Testing a feature that prevents checkpointing at SHARD_END twice
+ processingCheckpointer.setLargestPermittedCheckpointValue(lastSequenceNumberOfShard);
+ processingCheckpointer.setSequenceNumberAtShardEnd(processingCheckpointer.getLargestPermittedCheckpointValue());
+ processingCheckpointer.setLargestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END);
+ processingCheckpointer.checkpoint(lastSequenceNumberOfShard.getSequenceNumber(), lastSequenceNumberOfShard.getSubSequenceNumber());
+ Assert.assertEquals("Checkpoing at the sequence number at the end of a shard should be the same as "
+ + "checkpointing at SHARD_END",
+ ExtendedSequenceNumber.SHARD_END,
+ processingCheckpointer.getLastCheckpointValue());
+ }
+
+ private enum CheckpointAction {
+ NONE, NO_SEQUENCE_NUMBER, WITH_SEQUENCE_NUMBER;
+ }
+
+ /**
+ * Tests a bunch of mixed calls between checkpoint() and checkpoint(sequenceNumber) using a helper function.
+ *
+ * Also covers an edge case scenario where a shard consumer is started on a shard that never receives any records
+ * and is then shutdown
+ *
+ * @throws Exception
+ */
+ @SuppressWarnings("serial")
+ @Test
+ public final void testMixedCheckpointCalls() throws Exception {
+ ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
+
+ SequenceNumberValidator validator = mock(SequenceNumberValidator.class);
+ Mockito.doNothing().when(validator).validateSequenceNumber(anyString());
+
+ RecordProcessorCheckpointer processingCheckpointer =
+ new RecordProcessorCheckpointer(shardInfo, checkpoint, validator);
+
+ List> testPlans =
+ new ArrayList>();
+
+ /*
+ * Simulate a scenario where the checkpointer is created at "latest".
+ *
+ * Then the processor is called with no records (e.g. no more records are added, but the processor might be
+ * called just to allow checkpointing).
+ *
+ * Then the processor is shutdown.
+ */
+ testPlans.add(new LinkedHashMap() {
+ {
+ put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
+ put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
+ }
+ });
+ // Nearly the same as the previous test, but we don't call checkpoint after LATEST
+ testPlans.add(new LinkedHashMap() {
+ {
+ put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NONE);
+ put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
+ }
+ });
+
+ // Start with TRIM_HORIZON
+ testPlans.add(new LinkedHashMap() {
+ {
+ put(SentinelCheckpoint.TRIM_HORIZON.toString(), CheckpointAction.NONE);
+ put("1", CheckpointAction.NONE);
+ put("2", CheckpointAction.NO_SEQUENCE_NUMBER);
+ put("3", CheckpointAction.NONE);
+ put("4", CheckpointAction.WITH_SEQUENCE_NUMBER);
+ put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
+ }
+ });
+
+ // Start with LATEST and a bit more complexity
+ testPlans.add(new LinkedHashMap() {
+ {
+ put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
+ put("30", CheckpointAction.NONE);
+ put("332", CheckpointAction.WITH_SEQUENCE_NUMBER);
+ put("349", CheckpointAction.NONE);
+ put("4332", CheckpointAction.NO_SEQUENCE_NUMBER);
+ put("4338", CheckpointAction.NONE);
+ put("5349", CheckpointAction.WITH_SEQUENCE_NUMBER);
+ put("5358", CheckpointAction.NONE);
+ put("64332", CheckpointAction.NO_SEQUENCE_NUMBER);
+ put("64338", CheckpointAction.NO_SEQUENCE_NUMBER);
+ put("65358", CheckpointAction.WITH_SEQUENCE_NUMBER);
+ put("764338", CheckpointAction.WITH_SEQUENCE_NUMBER);
+ put("765349", CheckpointAction.NO_SEQUENCE_NUMBER);
+ put("765358", CheckpointAction.NONE);
+ put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
+ }
+ });
+
+ for (LinkedHashMap testPlan : testPlans) {
+ processingCheckpointer =
+ new RecordProcessorCheckpointer(shardInfo, checkpoint, validator);
+ testMixedCheckpointCalls(processingCheckpointer, testPlan);
+ }
+ }
+
+ /**
+ * A utility function to simplify various sequences of intermixed updates to the checkpointer, and calls to
+ * checpoint() and checkpoint(sequenceNumber). Takes a map where the key is a new sequence number to set in the
+ * checkpointer and the value is a CheckpointAction indicating an action to take: NONE -> Set the sequence number,
+ * don't do anything else NO_SEQUENCE_NUMBER -> Set the sequence number and call checkpoint() WITH_SEQUENCE_NUMBER
+ * -> Set the sequence number and call checkpoint(sequenceNumber) with that sequence number
+ *
+ * @param processingCheckpointer
+ * @param checkpointValueAndAction
+ * A map describing which checkpoint value to set in the checkpointer, and what action to take
+ * @throws Exception
+ */
+ private void testMixedCheckpointCalls(RecordProcessorCheckpointer processingCheckpointer,
+ LinkedHashMap checkpointValueAndAction) throws Exception {
+
+ for (Entry entry : checkpointValueAndAction.entrySet()) {
+ ExtendedSequenceNumber lastCheckpointValue = processingCheckpointer.getLastCheckpointValue();
+
+ if (SentinelCheckpoint.SHARD_END.toString().equals(entry.getKey())) {
+ // Before shard end, we will pretend to do what we expect the shutdown task to do
+ processingCheckpointer.setSequenceNumberAtShardEnd(processingCheckpointer
+ .getLargestPermittedCheckpointValue());
+ }
+ // Advance the largest checkpoint and check that it is updated.
+ processingCheckpointer.setLargestPermittedCheckpointValue(new ExtendedSequenceNumber(entry.getKey()));
+ Assert.assertEquals("Expected the largest checkpoint value to be updated after setting it",
+ new ExtendedSequenceNumber(entry.getKey()),
+ processingCheckpointer.getLargestPermittedCheckpointValue());
+ switch (entry.getValue()) {
+ case NONE:
+ // We were told to not checkpoint, so lets just make sure the last checkpoint value is the same as
+ // when this block started then continue to the next instruction
+ Assert.assertEquals("Expected the last checkpoint value to stay the same if we didn't checkpoint",
+ lastCheckpointValue,
+ processingCheckpointer.getLastCheckpointValue());
+ continue;
+ case NO_SEQUENCE_NUMBER:
+ processingCheckpointer.checkpoint();
+ break;
+ case WITH_SEQUENCE_NUMBER:
+ processingCheckpointer.checkpoint(entry.getKey());
+ break;
+ }
+ // We must have checkpointed to get here, so let's make sure our last checkpoint value is up to date
+ Assert.assertEquals("Expected the last checkpoint value to change after checkpointing",
+ new ExtendedSequenceNumber(entry.getKey()),
+ processingCheckpointer.getLastCheckpointValue());
+ Assert.assertEquals("Expected the largest checkpoint value to remain the same since the last set",
+ new ExtendedSequenceNumber(entry.getKey()),
+ processingCheckpointer.getLargestPermittedCheckpointValue());
+ }
+ }
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/SequenceNumberValidatorTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/SequenceNumberValidatorTest.java
new file mode 100644
index 00000000..ce222f9e
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/SequenceNumberValidatorTest.java
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import junit.framework.Assert;
+
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.junit.Assert.fail;
+
+import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
+import com.amazonaws.services.kinesis.model.InvalidArgumentException;
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
+
+public class SequenceNumberValidatorTest {
+
+ private final boolean validateWithGetIterator = true;
+ private final String shardId = "shardid-123";
+
+ @Test
+ public final void testSequenceNumberValidator() {
+
+ IKinesisProxy proxy = Mockito.mock(IKinesisProxy.class);
+
+ SequenceNumberValidator validator = new SequenceNumberValidator(proxy, shardId, validateWithGetIterator);
+
+ String goodSequence = "456";
+ String iterator = "happyiterator";
+ String badSequence = "789";
+ Mockito.doReturn(iterator)
+ .when(proxy)
+ .getIterator(shardId, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), goodSequence);
+ Mockito.doThrow(new InvalidArgumentException(""))
+ .when(proxy)
+ .getIterator(shardId, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), badSequence);
+
+ validator.validateSequenceNumber(goodSequence);
+ Mockito.verify(proxy, Mockito.times(1)).getIterator(shardId,
+ ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
+ goodSequence);
+
+ try {
+ validator.validateSequenceNumber(badSequence);
+ fail("Bad sequence number did not cause the validator to throw an exception");
+ } catch (IllegalArgumentException e) {
+ Mockito.verify(proxy, Mockito.times(1)).getIterator(shardId,
+ ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
+ badSequence);
+ }
+
+ nonNumericValueValidationTest(validator, proxy, validateWithGetIterator);
+ }
+
+ @Test
+ public final void testNoValidation() {
+ IKinesisProxy proxy = Mockito.mock(IKinesisProxy.class);
+ String shardId = "shardid-123";
+ SequenceNumberValidator validator = new SequenceNumberValidator(proxy, shardId, !validateWithGetIterator);
+ String goodSequence = "456";
+
+ // Just checking that the false flag for validating against getIterator is honored
+ validator.validateSequenceNumber(goodSequence);
+ Mockito.verify(proxy, Mockito.times(0)).getIterator(shardId,
+ ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
+ goodSequence);
+
+ // Validator should still validate sentinel values
+ nonNumericValueValidationTest(validator, proxy, !validateWithGetIterator);
+ }
+
+ private void nonNumericValueValidationTest(SequenceNumberValidator validator,
+ IKinesisProxy proxy,
+ boolean validateWithGetIterator) {
+
+ String[] nonNumericStrings =
+ { null, "bogus-sequence-number", SentinelCheckpoint.LATEST.toString(),
+ SentinelCheckpoint.SHARD_END.toString(), SentinelCheckpoint.TRIM_HORIZON.toString() };
+
+ for (String nonNumericString : nonNumericStrings) {
+ try {
+ validator.validateSequenceNumber(nonNumericString);
+ fail("Validator should not consider " + nonNumericString + " a valid sequence number");
+ } catch (IllegalArgumentException e) {
+ // Non-numeric strings should always be rejected by the validator before the proxy can be called so we
+ // check that the proxy was not called at all
+ Mockito.verify(proxy, Mockito.times(0)).getIterator(shardId,
+ ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
+ nonNumericString);
+ }
+ }
+ }
+
+ @Test
+ public final void testIsDigits() {
+ // Check things that are all digits
+ String[] stringsOfDigits = {
+ "0",
+ "12",
+ "07897803434",
+ "12324456576788",
+ };
+ for (String digits : stringsOfDigits) {
+ Assert.assertTrue("Expected that " + digits + " would be considered a string of digits.",
+ SequenceNumberValidator.isDigits(digits));
+ }
+ // Check things that are not all digits
+ String[] stringsWithNonDigits = {
+ null,
+ "",
+ " ", // white spaces
+ "6 4",
+ "\t45",
+ "5242354235234\n",
+ "7\n6\n5\n",
+ "12s", // last character
+ "c07897803434", // first character
+ "1232445wef6576788", // interior
+ "no-digits",
+ };
+ for (String notAllDigits : stringsWithNonDigits) {
+ Assert.assertFalse("Expected that " + notAllDigits + " would not be considered a string of digits.",
+ SequenceNumberValidator.isDigits(notAllDigits));
+ }
+ }
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumerTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumerTest.java
new file mode 100644
index 00000000..27f8f13c
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumerTest.java
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.Test;
+
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
+import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.InMemoryCheckpointImpl;
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShardConsumer.ShardConsumerState;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
+import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
+import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
+import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
+import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
+import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
+import com.amazonaws.services.kinesis.model.Record;
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
+
+/**
+ * Unit tests of {@link ShardConsumer}.
+ */
+public class ShardConsumerTest {
+
+ private static final Log LOG = LogFactory.getLog(ShardConsumerTest.class);
+
+ private final IMetricsFactory metricsFactory = new NullMetricsFactory();
+ private final boolean callProcessRecordsForEmptyRecordList = false;
+ private final long taskBackoffTimeMillis = 500L;
+ private final long parentShardPollIntervalMillis = 50L;
+ private final boolean cleanupLeasesOfCompletedShards = true;
+ // We don't want any of these tests to run checkpoint validation
+ private final boolean skipCheckpointValidationValue = false;
+ private final InitialPositionInStream initialPositionInStream = InitialPositionInStream.LATEST;
+
+ // Use Executors.newFixedThreadPool since it returns ThreadPoolExecutor, which is
+ // ... a non-final public class, and so can be mocked and spied.
+ private final ExecutorService executorService = Executors.newFixedThreadPool(1);
+
+ /**
+ * Test method to verify consumer stays in INITIALIZING state when InitializationTask fails.
+ */
+ @SuppressWarnings("unchecked")
+ @Test
+ public final void testInitializationStateUponFailure() throws Exception {
+ ShardInfo shardInfo = new ShardInfo("s-0-0", "testToken", null);
+ ICheckpoint checkpoint = mock(ICheckpoint.class);
+
+ when(checkpoint.getCheckpoint(anyString())).thenThrow(NullPointerException.class);
+ IRecordProcessor processor = mock(IRecordProcessor.class);
+ IKinesisProxy streamProxy = mock(IKinesisProxy.class);
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseManager.getLease(anyString())).thenReturn(null);
+ StreamConfig streamConfig =
+ new StreamConfig(streamProxy,
+ 1,
+ 10,
+ callProcessRecordsForEmptyRecordList,
+ skipCheckpointValidationValue,
+ initialPositionInStream);
+
+ ShardConsumer consumer =
+ new ShardConsumer(shardInfo,
+ streamConfig,
+ checkpoint,
+ processor,
+ null,
+ parentShardPollIntervalMillis,
+ cleanupLeasesOfCompletedShards,
+ executorService,
+ metricsFactory,
+ taskBackoffTimeMillis);
+
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
+ consumer.consumeShard(); // initialize
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
+ consumer.consumeShard(); // initialize
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ consumer.consumeShard(); // initialize
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ consumer.consumeShard(); // initialize
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ }
+
+
+ /**
+ * Test method to verify consumer stays in INITIALIZING state when InitializationTask fails.
+ */
+ @SuppressWarnings("unchecked")
+ @Test
+ public final void testInitializationStateUponSubmissionFailure() throws Exception {
+ ShardInfo shardInfo = new ShardInfo("s-0-0", "testToken", null);
+ ICheckpoint checkpoint = mock(ICheckpoint.class);
+ ExecutorService spyExecutorService = spy(executorService);
+
+ when(checkpoint.getCheckpoint(anyString())).thenThrow(NullPointerException.class);
+ IRecordProcessor processor = mock(IRecordProcessor.class);
+ IKinesisProxy streamProxy = mock(IKinesisProxy.class);
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseManager.getLease(anyString())).thenReturn(null);
+ StreamConfig streamConfig =
+ new StreamConfig(streamProxy,
+ 1,
+ 10,
+ callProcessRecordsForEmptyRecordList,
+ skipCheckpointValidationValue,
+ initialPositionInStream);
+
+ ShardConsumer consumer =
+ new ShardConsumer(shardInfo,
+ streamConfig,
+ checkpoint,
+ processor,
+ null,
+ parentShardPollIntervalMillis,
+ cleanupLeasesOfCompletedShards,
+ spyExecutorService,
+ metricsFactory,
+ taskBackoffTimeMillis);
+
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
+ consumer.consumeShard(); // initialize
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
+
+ doThrow(new RejectedExecutionException()).when(spyExecutorService).submit(any(InitializeTask.class));
+ consumer.consumeShard(); // initialize
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ consumer.consumeShard(); // initialize
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ consumer.consumeShard(); // initialize
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public final void testRecordProcessorThrowable() throws Exception {
+ ShardInfo shardInfo = new ShardInfo("s-0-0", "testToken", null);
+ ICheckpoint checkpoint = mock(ICheckpoint.class);
+ IRecordProcessor processor = mock(IRecordProcessor.class);
+ IKinesisProxy streamProxy = mock(IKinesisProxy.class);
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ StreamConfig streamConfig =
+ new StreamConfig(streamProxy,
+ 1,
+ 10,
+ callProcessRecordsForEmptyRecordList,
+ skipCheckpointValidationValue,
+ initialPositionInStream);
+
+ ShardConsumer consumer =
+ new ShardConsumer(shardInfo,
+ streamConfig,
+ checkpoint,
+ processor,
+ null,
+ parentShardPollIntervalMillis,
+ cleanupLeasesOfCompletedShards,
+ executorService,
+ metricsFactory,
+ taskBackoffTimeMillis);
+
+ when(leaseManager.getLease(anyString())).thenReturn(null);
+ when(checkpoint.getCheckpoint(anyString())).thenReturn(new ExtendedSequenceNumber("123"));
+
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
+ consumer.consumeShard(); // submit BlockOnParentShardTask
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
+ verify(processor, times(0)).initialize(any(InitializationInput.class));
+
+ // Throw Error when IRecordProcessor.initialize() is invoked.
+ doThrow(new Error("ThrowableTest")).when(processor).initialize(any(InitializationInput.class));
+
+ consumer.consumeShard(); // submit InitializeTask
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ verify(processor, times(1)).initialize(any(InitializationInput.class));
+
+ try {
+ // Checking the status of submitted InitializeTask from above should throw exception.
+ consumer.consumeShard();
+ fail("ShardConsumer should have thrown exception.");
+ } catch (RuntimeException e) {
+ assertThat(e.getCause(), instanceOf(ExecutionException.class));
+ }
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ verify(processor, times(1)).initialize(any(InitializationInput.class));
+
+ doNothing().when(processor).initialize(any(InitializationInput.class));
+
+ consumer.consumeShard(); // submit InitializeTask again.
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ verify(processor, times(2)).initialize(any(InitializationInput.class));
+
+ // Checking the status of submitted InitializeTask from above should pass.
+ consumer.consumeShard();
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.PROCESSING)));
+ }
+
+ /**
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShardConsumer#consumeShard()}
+ */
+ @Test
+ public final void testConsumeShard() throws Exception {
+ int numRecs = 10;
+ BigInteger startSeqNum = BigInteger.ONE;
+ String streamShardId = "kinesis-0-0";
+ String testConcurrencyToken = "testToken";
+ File file =
+ KinesisLocalFileDataCreator.generateTempDataFile(1,
+ "kinesis-0-",
+ numRecs,
+ startSeqNum,
+ "unitTestSCT001");
+
+ IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath());
+
+ final int maxRecords = 2;
+ final int idleTimeMS = 0; // keep unit tests fast
+ ICheckpoint checkpoint = new InMemoryCheckpointImpl(startSeqNum.toString());
+ checkpoint.setCheckpoint(streamShardId, ExtendedSequenceNumber.TRIM_HORIZON, testConcurrencyToken);
+ @SuppressWarnings("unchecked")
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseManager.getLease(anyString())).thenReturn(null);
+
+ TestStreamlet processor = new TestStreamlet();
+
+ StreamConfig streamConfig =
+ new StreamConfig(fileBasedProxy,
+ maxRecords,
+ idleTimeMS,
+ callProcessRecordsForEmptyRecordList,
+ skipCheckpointValidationValue,
+ initialPositionInStream);
+
+ ShardInfo shardInfo = new ShardInfo(streamShardId, testConcurrencyToken, null);
+ ShardConsumer consumer =
+ new ShardConsumer(shardInfo,
+ streamConfig,
+ checkpoint,
+ processor,
+ leaseManager,
+ parentShardPollIntervalMillis,
+ cleanupLeasesOfCompletedShards,
+ executorService,
+ metricsFactory,
+ taskBackoffTimeMillis);
+
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
+ consumer.consumeShard(); // check on parent shards
+ Thread.sleep(50L);
+ consumer.consumeShard(); // start initialization
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
+ consumer.consumeShard(); // initialize
+ Thread.sleep(50L);
+
+ // We expect to process all records in numRecs calls
+ for (int i = 0; i < numRecs;) {
+ boolean newTaskSubmitted = consumer.consumeShard();
+ if (newTaskSubmitted) {
+ LOG.debug("New processing task was submitted, call # " + i);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.PROCESSING)));
+ // CHECKSTYLE:IGNORE ModifiedControlVariable FOR NEXT 1 LINES
+ i += maxRecords;
+ }
+ Thread.sleep(50L);
+ }
+
+ assertThat(processor.getShutdownReason(), nullValue());
+ consumer.beginShutdown();
+ Thread.sleep(50L);
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.SHUTTING_DOWN)));
+ consumer.beginShutdown();
+ assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.SHUTDOWN_COMPLETE)));
+ assertThat(processor.getShutdownReason(), is(equalTo(ShutdownReason.ZOMBIE)));
+
+ executorService.shutdown();
+ executorService.awaitTermination(60, TimeUnit.SECONDS);
+
+ String iterator = fileBasedProxy.getIterator(streamShardId, ShardIteratorType.TRIM_HORIZON.toString(), null);
+ List expectedRecords = toUserRecords(fileBasedProxy.get(iterator, numRecs).getRecords());
+ verifyConsumedRecords(expectedRecords, processor.getProcessedRecords());
+ file.delete();
+ }
+
+ //@formatter:off (gets the formatting wrong)
+ private void verifyConsumedRecords(List expectedRecords,
+ List actualRecords) {
+ //@formatter:on
+ assertThat(actualRecords.size(), is(equalTo(expectedRecords.size())));
+ ListIterator expectedIter = expectedRecords.listIterator();
+ ListIterator actualIter = actualRecords.listIterator();
+ for (int i = 0; i < expectedRecords.size(); ++i) {
+ assertThat(actualIter.next(), is(equalTo(expectedIter.next())));
+ }
+ }
+
+ private List toUserRecords(List records) {
+ if (records == null || records.isEmpty()) {
+ return records;
+ }
+ List userRecords = new ArrayList();
+ for (Record record : records) {
+ userRecords.add(new UserRecord(record));
+ }
+ return userRecords;
+ }
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardInfoTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardInfoTest.java
new file mode 100644
index 00000000..d62d880d
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardInfoTest.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+
+import junit.framework.Assert;
+
+import org.junit.Before;
+import org.junit.Test;
+
+public class ShardInfoTest {
+ private static final String CONCURRENCY_TOKEN = UUID.randomUUID().toString();
+ private static final String SHARD_ID = "shardId-test";
+ private final Set parentShardIds = new HashSet<>();
+ private ShardInfo testShardInfo;
+
+ @Before
+ public void setUpPacboyShardInfo() {
+ // Add parent shard Ids
+ parentShardIds.add("shard-1");
+ parentShardIds.add("shard-2");
+
+ testShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds);
+ }
+
+ @Test
+ public void testPacboyShardInfoEqualsWithSameArgs() {
+ ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds);
+ Assert.assertTrue("Equal should return true for arguments all the same", testShardInfo.equals(equalShardInfo));
+ }
+
+ @Test
+ public void testPacboyShardInfoEqualsWithNull() {
+ Assert.assertFalse("Equal should return false when object is null", testShardInfo.equals(null));
+ }
+
+ @Test
+ public void testPacboyShardInfoEqualsForShardId() {
+ ShardInfo diffShardInfo = new ShardInfo("shardId-diff", CONCURRENCY_TOKEN, parentShardIds);
+ Assert.assertFalse("Equal should return false with different shard id", diffShardInfo.equals(testShardInfo));
+ diffShardInfo = new ShardInfo(null, CONCURRENCY_TOKEN, parentShardIds);
+ Assert.assertFalse("Equal should return false with null shard id", diffShardInfo.equals(testShardInfo));
+ }
+
+ @Test
+ public void testPacboyShardInfoEqualsForfToken() {
+ ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, UUID.randomUUID().toString(), parentShardIds);
+ Assert.assertFalse("Equal should return false with different concurrency token",
+ diffShardInfo.equals(testShardInfo));
+ diffShardInfo = new ShardInfo(SHARD_ID, null, parentShardIds);
+ Assert.assertFalse("Equal should return false for null concurrency token", diffShardInfo.equals(testShardInfo));
+ }
+
+ @Test
+ public void testPacboyShardInfoEqualsForDifferentlyOrderedParentIds() {
+ List differentlyOrderedParentShardIds = new ArrayList<>();
+ differentlyOrderedParentShardIds.add("shard-2");
+ differentlyOrderedParentShardIds.add("shard-1");
+ ShardInfo shardInfoWithDifferentlyOrderedParentShardIds =
+ new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, differentlyOrderedParentShardIds);
+ Assert.assertTrue("Equal should return true even with parent shard Ids reordered",
+ shardInfoWithDifferentlyOrderedParentShardIds.equals(testShardInfo));
+ }
+
+ @Test
+ public void testPacboyShardInfoEqualsForParentIds() {
+ Set diffParentIds = new HashSet<>();
+ diffParentIds.add("shard-3");
+ diffParentIds.add("shard-4");
+ ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, diffParentIds);
+ Assert.assertFalse("Equal should return false with different parent shard Ids",
+ diffShardInfo.equals(testShardInfo));
+ diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, null);
+ Assert.assertFalse("Equal should return false with null parent shard Ids", diffShardInfo.equals(testShardInfo));
+ }
+
+ @Test
+ public void testPacboyShardInfoSameHashCode() {
+ ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds);
+ Assert.assertTrue("Shard info objects should have same hashCode for the same arguments",
+ equalShardInfo.hashCode() == testShardInfo.hashCode());
+ }
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardObjectHelper.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardObjectHelper.java
new file mode 100644
index 00000000..f154119a
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardObjectHelper.java
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.amazonaws.services.kinesis.model.HashKeyRange;
+import com.amazonaws.services.kinesis.model.SequenceNumberRange;
+import com.amazonaws.services.kinesis.model.Shard;
+
+/**
+ * Helper class to create Shard, SequenceRange and related objects.
+ */
+class ShardObjectHelper {
+
+ private static final int EXPONENT = 128;
+
+ /**
+ * Max value of a sequence number (2^128 -1). Useful for defining sequence number range for a shard.
+ */
+ static final String MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString();
+
+ /**
+ * Min value of a sequence number (0). Useful for defining sequence number range for a shard.
+ */
+ static final String MIN_SEQUENCE_NUMBER = BigInteger.ZERO.toString();
+
+ /**
+ * Max value of a hash key (2^128 -1). Useful for defining hash key range for a shard.
+ */
+ static final String MAX_HASH_KEY = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString();
+
+ /**
+ * Min value of a hash key (0). Useful for defining sequence number range for a shard.
+ */
+ public static final String MIN_HASH_KEY = BigInteger.ZERO.toString();
+
+ /**
+ *
+ */
+ private ShardObjectHelper() {
+ }
+
+
+ /** Helper method to create a new shard object.
+ * @param shardId
+ * @param parentShardId
+ * @param adjacentParentShardId
+ * @param sequenceNumberRange
+ * @return
+ */
+ static Shard newShard(String shardId,
+ String parentShardId,
+ String adjacentParentShardId,
+ SequenceNumberRange sequenceNumberRange) {
+ return newShard(shardId, parentShardId, adjacentParentShardId, sequenceNumberRange, null);
+ }
+
+ /** Helper method to create a new shard object.
+ * @param shardId
+ * @param parentShardId
+ * @param adjacentParentShardId
+ * @param sequenceNumberRange
+ * @param hashKeyRange
+ * @return
+ */
+ static Shard newShard(String shardId,
+ String parentShardId,
+ String adjacentParentShardId,
+ SequenceNumberRange sequenceNumberRange,
+ HashKeyRange hashKeyRange) {
+ Shard shard = new Shard();
+ shard.setShardId(shardId);
+ shard.setParentShardId(parentShardId);
+ shard.setAdjacentParentShardId(adjacentParentShardId);
+ shard.setSequenceNumberRange(sequenceNumberRange);
+ shard.setHashKeyRange(hashKeyRange);
+
+ return shard;
+ }
+
+ /** Helper method.
+ * @param startingSequenceNumber
+ * @param endingSequenceNumber
+ * @return
+ */
+ static SequenceNumberRange newSequenceNumberRange(String startingSequenceNumber, String endingSequenceNumber) {
+ SequenceNumberRange range = new SequenceNumberRange();
+ range.setStartingSequenceNumber(startingSequenceNumber);
+ range.setEndingSequenceNumber(endingSequenceNumber);
+ return range;
+ }
+
+ /** Helper method.
+ * @param startingHashKey
+ * @param endingHashKey
+ * @return
+ */
+ static HashKeyRange newHashKeyRange(String startingHashKey, String endingHashKey) {
+ HashKeyRange range = new HashKeyRange();
+ range.setStartingHashKey(startingHashKey);
+ range.setEndingHashKey(endingHashKey);
+ return range;
+ }
+
+ static List getParentShardIds(Shard shard) {
+ List parentShardIds = new ArrayList<>(2);
+ if (shard.getAdjacentParentShardId() != null) {
+ parentShardIds.add(shard.getAdjacentParentShardId());
+ }
+ if (shard.getParentShardId() != null) {
+ parentShardIds.add(shard.getParentShardId());
+ }
+ return parentShardIds;
+ }
+
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSequenceVerifier.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSequenceVerifier.java
new file mode 100644
index 00000000..5ad42359
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSequenceVerifier.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentSkipListSet;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import com.amazonaws.services.kinesis.model.Shard;
+import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
+
+/**
+ * Helper class to verify shard lineage in unit tests that use TestStreamlet.
+ * Verifies that parent shard processors were shutdown before child shard processor was initialized.
+ */
+class ShardSequenceVerifier {
+
+ private static final Log LOG = LogFactory.getLog(ShardSequenceVerifier.class);
+ private Map shardIdToShards = new HashMap();
+ private ConcurrentSkipListSet initializedShards = new ConcurrentSkipListSet<>();
+ private ConcurrentSkipListSet shutdownShards = new ConcurrentSkipListSet<>();
+ private List validationFailures = Collections.synchronizedList(new ArrayList());
+
+ /**
+ * Constructor with the shard list for the stream.
+ */
+ ShardSequenceVerifier(List shardList) {
+ for (Shard shard : shardList) {
+ shardIdToShards.put(shard.getShardId(), shard);
+ }
+ }
+
+ void registerInitialization(String shardId) {
+ List parentShardIds = ShardObjectHelper.getParentShardIds(shardIdToShards.get(shardId));
+ for (String parentShardId : parentShardIds) {
+ if (initializedShards.contains(parentShardId)) {
+ if (!shutdownShards.contains(parentShardId)) {
+ String message = "Parent shard " + parentShardId + " was not shutdown before shard "
+ + shardId + " was initialized.";
+ LOG.error(message);
+ validationFailures.add(message);
+ }
+ }
+ }
+ initializedShards.add(shardId);
+ }
+
+ void registerShutdown(String shardId, ShutdownReason reason) {
+ if (reason.equals(ShutdownReason.TERMINATE)) {
+ shutdownShards.add(shardId);
+ }
+ }
+
+ void verify() {
+ for (String message : validationFailures) {
+ LOG.error(message);
+ }
+ Assert.assertTrue(validationFailures.isEmpty());
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTaskIntegrationTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTaskIntegrationTest.java
new file mode 100644
index 00000000..6843efbd
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTaskIntegrationTest.java
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.amazonaws.AmazonServiceException;
+import com.amazonaws.auth.AWSCredentialsProvider;
+import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
+import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
+import com.amazonaws.services.kinesis.AmazonKinesis;
+import com.amazonaws.services.kinesis.AmazonKinesisClient;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy;
+import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
+import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
+import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager;
+import com.amazonaws.services.kinesis.leases.interfaces.IKinesisClientLeaseManager;
+import com.amazonaws.services.kinesis.model.StreamStatus;
+
+/**
+ * WARN: to run this integration test you'll have to provide a AwsCredentials.properties file on the classpath.
+ */
+public class ShardSyncTaskIntegrationTest {
+
+ private static final String STREAM_NAME = "IntegrationTestStream02";
+ private static final String KINESIS_ENDPOINT = "https://kinesis.us-east-1.amazonaws.com";
+
+ private static AWSCredentialsProvider credentialsProvider;
+ private IKinesisClientLeaseManager leaseManager;
+ private IKinesisProxy kinesisProxy;
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ credentialsProvider = new DefaultAWSCredentialsProviderChain();
+ AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider);
+
+ try {
+ kinesis.createStream(STREAM_NAME, 1);
+ } catch (AmazonServiceException ase) {
+
+ }
+ StreamStatus status;
+ do {
+ status = StreamStatus.fromValue(kinesis.describeStream(STREAM_NAME).getStreamDescription().getStreamStatus());
+ } while (status != StreamStatus.ACTIVE);
+
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ boolean useConsistentReads = true;
+ leaseManager =
+ new KinesisClientLeaseManager("ShardSyncTaskIntegrationTest",
+ new AmazonDynamoDBClient(credentialsProvider),
+ useConsistentReads);
+ kinesisProxy =
+ new KinesisProxy(STREAM_NAME,
+ new DefaultAWSCredentialsProviderChain(),
+ KINESIS_ENDPOINT);
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ /**
+ * Test method for call().
+ *
+ * @throws CapacityExceededException
+ * @throws DependencyException
+ * @throws InvalidStateException
+ * @throws ProvisionedThroughputException
+ */
+ @Test
+ public final void testCall() throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ if (!leaseManager.leaseTableExists()) {
+ final Long readCapacity = 10L;
+ final Long writeCapacity = 10L;
+ leaseManager.createLeaseTableIfNotExists(readCapacity, writeCapacity);
+ }
+ leaseManager.deleteAll();
+ Set shardIds = kinesisProxy.getAllShardIds();
+ ShardSyncTask syncTask =
+ new ShardSyncTask(kinesisProxy, leaseManager, InitialPositionInStream.LATEST, false, 0L);
+ syncTask.call();
+ List leases = leaseManager.listLeases();
+ Set leaseKeys = new HashSet();
+ for (KinesisClientLease lease : leases) {
+ leaseKeys.add(lease.getLeaseKey());
+ }
+
+ // Verify that all shardIds had leases for them
+ Assert.assertEquals(shardIds.size(), leases.size());
+ shardIds.removeAll(leaseKeys);
+ Assert.assertTrue(shardIds.isEmpty());
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncerTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncerTest.java
new file mode 100644
index 00000000..f02943b4
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncerTest.java
@@ -0,0 +1,1418 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.io.File;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.KinesisClientLibIOException;
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ExceptionThrowingLeaseManager.ExceptionThrowingLeaseManagerMethods;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
+import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
+import com.amazonaws.services.kinesis.leases.exceptions.LeasingException;
+import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager;
+import com.amazonaws.services.kinesis.leases.impl.LeaseManager;
+import com.amazonaws.services.kinesis.model.HashKeyRange;
+import com.amazonaws.services.kinesis.model.SequenceNumberRange;
+import com.amazonaws.services.kinesis.model.Shard;
+
+import junit.framework.Assert;
+
+/**
+ *
+ */
+// CHECKSTYLE:IGNORE JavaNCSS FOR NEXT 800 LINES
+public class ShardSyncerTest {
+ private static final Log LOG = LogFactory.getLog(ShardSyncer.class);
+ private final InitialPositionInStream latestPosition = InitialPositionInStream.LATEST;
+ private final boolean cleanupLeasesOfCompletedShards = true;
+ AmazonDynamoDB ddbClient = DynamoDBEmbedded.create();
+ LeaseManager leaseManager = new KinesisClientLeaseManager("tempTestTable", ddbClient);
+ private static final int EXPONENT = 128;
+ /**
+ * Old/Obsolete max value of a sequence number (2^128 -1).
+ */
+ public static final BigInteger MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE);
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ boolean created = leaseManager.createLeaseTableIfNotExists(1L, 1L);
+ if (created) {
+ LOG.info("New table created.");
+ }
+ leaseManager.deleteAll();
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @After
+ public void tearDown() throws Exception {
+ leaseManager.deleteAll();
+ }
+
+ /**
+ * Test determineNewLeasesToCreate() where there are no shards
+ */
+ @Test
+ public final void testDetermineNewLeasesToCreateNoShards() {
+ List shards = new ArrayList();
+ List leases = new ArrayList();
+
+ Assert.assertTrue(
+ ShardSyncer.determineNewLeasesToCreate(shards, leases, InitialPositionInStream.LATEST).isEmpty());
+ }
+
+ /**
+ * Test determineNewLeasesToCreate() where there are no leases and no resharding operations have been performed
+ */
+ @Test
+ public final void testDetermineNewLeasesToCreate0Leases0Reshards() {
+ List shards = new ArrayList();
+ List currentLeases = new ArrayList();
+ SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null);
+
+ String shardId0 = "shardId-0";
+ shards.add(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange));
+
+ String shardId1 = "shardId-1";
+ shards.add(ShardObjectHelper.newShard(shardId1, null, null, sequenceRange));
+
+ List newLeases =
+ ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, InitialPositionInStream.LATEST);
+ Assert.assertEquals(2, newLeases.size());
+ Set expectedLeaseShardIds = new HashSet();
+ expectedLeaseShardIds.add(shardId0);
+ expectedLeaseShardIds.add(shardId1);
+ for (KinesisClientLease lease : newLeases) {
+ Assert.assertTrue(expectedLeaseShardIds.contains(lease.getLeaseKey()));
+ }
+ }
+
+ /**
+ * Test bootstrapShardLeases() starting at TRIM_HORIZON ("beginning" of stream)
+ *
+ * @throws ProvisionedThroughputException
+ * @throws InvalidStateException
+ * @throws DependencyException
+ * @throws IOException
+ * @throws KinesisClientLibIOException
+ */
+ @Test
+ public final void testBootstrapShardLeasesAtTrimHorizon()
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException, IOException,
+ KinesisClientLibIOException {
+ testBootstrapShardLeasesAtStartingPosition(InitialPositionInStream.TRIM_HORIZON);
+ }
+
+ /**
+ * Test bootstrapShardLeases() starting at LATEST (tip of stream)
+ *
+ * @throws ProvisionedThroughputException
+ * @throws InvalidStateException
+ * @throws DependencyException
+ * @throws IOException
+ * @throws KinesisClientLibIOException
+ */
+ @Test
+ public final void testBootstrapShardLeasesAtLatest()
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException, IOException,
+ KinesisClientLibIOException {
+ testBootstrapShardLeasesAtStartingPosition(InitialPositionInStream.LATEST);
+ }
+
+ /**
+ * @throws KinesisClientLibIOException
+ * @throws DependencyException
+ * @throws InvalidStateException
+ * @throws ProvisionedThroughputException
+ * @throws IOException
+ */
+ @Test
+ public final void testCheckAndCreateLeasesForNewShardsAtLatest()
+ throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException,
+ IOException {
+ List shards = constructShardListForGraphA();
+ File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 2, "testBootstrap1");
+ dataFile.deleteOnExit();
+ IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath());
+
+ ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy,
+ leaseManager,
+ InitialPositionInStream.LATEST,
+ cleanupLeasesOfCompletedShards);
+ List newLeases = leaseManager.listLeases();
+ Set expectedLeaseShardIds = new HashSet();
+ expectedLeaseShardIds.add("shardId-4");
+ expectedLeaseShardIds.add("shardId-8");
+ expectedLeaseShardIds.add("shardId-9");
+ expectedLeaseShardIds.add("shardId-10");
+ Assert.assertEquals(expectedLeaseShardIds.size(), newLeases.size());
+ for (KinesisClientLease lease1 : newLeases) {
+ Assert.assertTrue(expectedLeaseShardIds.contains(lease1.getLeaseKey()));
+ Assert.assertEquals(ExtendedSequenceNumber.LATEST, lease1.getCheckpoint());
+ }
+ dataFile.delete();
+ }
+
+ /**
+ * @throws KinesisClientLibIOException
+ * @throws DependencyException
+ * @throws InvalidStateException
+ * @throws ProvisionedThroughputException
+ * @throws IOException
+ */
+ @Test
+ public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizon()
+ throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException,
+ IOException {
+ List shards = constructShardListForGraphA();
+ File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 2, "testBootstrap1");
+ dataFile.deleteOnExit();
+ IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath());
+
+ ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy,
+ leaseManager,
+ InitialPositionInStream.TRIM_HORIZON,
+ cleanupLeasesOfCompletedShards);
+ List newLeases = leaseManager.listLeases();
+ Set expectedLeaseShardIds = new HashSet();
+ for (int i = 0; i < 11; i++) {
+ expectedLeaseShardIds.add("shardId-" + i);
+ }
+ Assert.assertEquals(expectedLeaseShardIds.size(), newLeases.size());
+ for (KinesisClientLease lease1 : newLeases) {
+ Assert.assertTrue(expectedLeaseShardIds.contains(lease1.getLeaseKey()));
+ Assert.assertEquals(ExtendedSequenceNumber.TRIM_HORIZON, lease1.getCheckpoint());
+ }
+ dataFile.delete();
+ }
+
+ /**
+ * @throws KinesisClientLibIOException
+ * @throws DependencyException
+ * @throws InvalidStateException
+ * @throws ProvisionedThroughputException
+ * @throws IOException
+ */
+ @Test(expected = KinesisClientLibIOException.class)
+ public final void testCheckAndCreateLeasesForNewShardsWhenParentIsOpen()
+ throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException,
+ IOException {
+ List shards = constructShardListForGraphA();
+ SequenceNumberRange range = shards.get(0).getSequenceNumberRange();
+ range.setEndingSequenceNumber(null);
+ shards.get(3).setSequenceNumberRange(range);
+ File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 2, "testBootstrap1");
+ dataFile.deleteOnExit();
+ IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath());
+
+ ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy,
+ leaseManager,
+ InitialPositionInStream.TRIM_HORIZON,
+ cleanupLeasesOfCompletedShards);
+ dataFile.delete();
+ }
+
+ /**
+ * @throws KinesisClientLibIOException
+ * @throws DependencyException
+ * @throws InvalidStateException
+ * @throws ProvisionedThroughputException
+ * @throws IOException
+ */
+ @Test
+ public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShard()
+ throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException,
+ IOException {
+ testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardImpl(null, Integer.MAX_VALUE);
+ }
+
+ /**
+ * @throws KinesisClientLibIOException
+ * @throws DependencyException
+ * @throws InvalidStateException
+ * @throws ProvisionedThroughputException
+ * @throws IOException
+ */
+ @Test
+ public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithDeleteLeaseExceptions()
+ throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException,
+ IOException {
+ // Define the max calling count for lease manager methods.
+ // From the Shard Graph, the max count of calling could be 10
+ int maxCallingCount = 10;
+ for (int c = 1; c <= maxCallingCount; c = c + 2) {
+ testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardImpl(
+ ExceptionThrowingLeaseManagerMethods.DELETELEASE, c);
+ // Need to clean up lease manager every time after calling ShardSyncer
+ leaseManager.deleteAll();
+ }
+ }
+
+ /**
+ * @throws KinesisClientLibIOException
+ * @throws DependencyException
+ * @throws InvalidStateException
+ * @throws ProvisionedThroughputException
+ * @throws IOException
+ */
+ @Test
+ public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithListLeasesExceptions()
+ throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException,
+ IOException {
+ // Define the max calling count for lease manager methods.
+ // From the Shard Graph, the max count of calling could be 10
+ int maxCallingCount = 10;
+ for (int c = 1; c <= maxCallingCount; c = c + 2) {
+ testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardImpl(
+ ExceptionThrowingLeaseManagerMethods.LISTLEASES, c);
+ // Need to clean up lease manager every time after calling ShardSyncer
+ leaseManager.deleteAll();
+ }
+ }
+
+ /**
+ * @throws KinesisClientLibIOException
+ * @throws DependencyException
+ * @throws InvalidStateException
+ * @throws ProvisionedThroughputException
+ * @throws IOException
+ */
+ @Test
+ public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithCreateLeaseExceptions()
+ throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException,
+ IOException {
+ // Define the max calling count for lease manager methods.
+ // From the Shard Graph, the max count of calling could be 10
+ int maxCallingCount = 5;
+ for (int c = 1; c <= maxCallingCount; c = c + 2) {
+ testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardImpl(
+ ExceptionThrowingLeaseManagerMethods.CREATELEASEIFNOTEXISTS, c);
+ // Need to clean up lease manager every time after calling ShardSyncer
+ leaseManager.deleteAll();
+ }
+ }
+
+ // Try catch leaseException for different lease manager methods and eventually let it succeed.
+ // This would not throw any exceptions if:
+ // 1). exceptionMethod equals to null or NONE.
+ // 2). exceptionTime is a very big or negative value.
+ private void retryCheckAndCreateLeaseForNewShards(IKinesisProxy kinesisProxy,
+ ExceptionThrowingLeaseManagerMethods exceptionMethod,
+ int exceptionTime)
+ throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException {
+ if (exceptionMethod != null) {
+ ExceptionThrowingLeaseManager exceptionThrowingLeaseManager =
+ new ExceptionThrowingLeaseManager(leaseManager);
+ // Set exception and throwing time for exceptionThrowingManager.
+ exceptionThrowingLeaseManager.setLeaseLeaseManagerThrowingExceptionScenario(exceptionMethod, exceptionTime);
+ // Only need to try two times.
+ for (int i = 1; i <= 2; i++) {
+ try {
+ ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy,
+ exceptionThrowingLeaseManager,
+ InitialPositionInStream.TRIM_HORIZON,
+ cleanupLeasesOfCompletedShards);
+ return;
+ } catch (LeasingException e) {
+ LOG.debug("Catch leasing exception", e);
+ }
+ // Clear throwing exception scenario every time after calling ShardSyncer
+ exceptionThrowingLeaseManager.clearLeaseManagerThrowingExceptionScenario();
+ }
+ } else {
+ ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy,
+ leaseManager,
+ InitialPositionInStream.TRIM_HORIZON,
+ cleanupLeasesOfCompletedShards);
+ }
+ }
+
+ // Real implementation of testing CheckAndCreateLeasesForNewShards with different leaseManager types.
+ private void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardImpl(
+ ExceptionThrowingLeaseManagerMethods exceptionMethod, int exceptionTime)
+ throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException,
+ IOException {
+ List shards = constructShardListForGraphA();
+ File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 2, "testBootstrap1");
+ dataFile.deleteOnExit();
+ IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath());
+
+ retryCheckAndCreateLeaseForNewShards(kinesisProxy, exceptionMethod, exceptionTime);
+
+ List newLeases = leaseManager.listLeases();
+ Map expectedShardIdToCheckpointMap =
+ new HashMap();
+ for (int i = 0; i < 11; i++) {
+ expectedShardIdToCheckpointMap.put("shardId-" + i, ExtendedSequenceNumber.TRIM_HORIZON);
+ }
+ Assert.assertEquals(expectedShardIdToCheckpointMap.size(), newLeases.size());
+ for (KinesisClientLease lease1 : newLeases) {
+ ExtendedSequenceNumber expectedCheckpoint = expectedShardIdToCheckpointMap.get(lease1.getLeaseKey());
+ Assert.assertNotNull(expectedCheckpoint);
+ Assert.assertEquals(expectedCheckpoint, lease1.getCheckpoint());
+ }
+
+ KinesisClientLease closedShardLease = leaseManager.getLease("shardId-0");
+ closedShardLease.setCheckpoint(ExtendedSequenceNumber.SHARD_END);
+ leaseManager.updateLease(closedShardLease);
+ expectedShardIdToCheckpointMap.remove(closedShardLease.getLeaseKey());
+ KinesisClientLease childShardLease = leaseManager.getLease("shardId-6");
+ childShardLease.setCheckpoint(new ExtendedSequenceNumber("34290"));
+ leaseManager.updateLease(childShardLease);
+ expectedShardIdToCheckpointMap.put(childShardLease.getLeaseKey(), new ExtendedSequenceNumber("34290"));
+
+ retryCheckAndCreateLeaseForNewShards(kinesisProxy, exceptionMethod, exceptionTime);
+
+ newLeases = leaseManager.listLeases();
+ Assert.assertEquals(expectedShardIdToCheckpointMap.size(), newLeases.size());
+ for (KinesisClientLease lease1 : newLeases) {
+ ExtendedSequenceNumber expectedCheckpoint = expectedShardIdToCheckpointMap.get(lease1.getLeaseKey());
+ Assert.assertNotNull(expectedCheckpoint);
+ Assert.assertEquals(expectedCheckpoint, lease1.getCheckpoint());
+ }
+
+ dataFile.delete();
+ }
+
+ /**
+ * Test bootstrapShardLeases() - cleanup garbage leases.
+ *
+ * @throws ProvisionedThroughputException
+ * @throws InvalidStateException
+ * @throws DependencyException
+ * @throws IOException
+ * @throws KinesisClientLibIOException
+ */
+ @Test
+ public final void testBootstrapShardLeasesCleanupGarbage()
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException, IOException,
+ KinesisClientLibIOException {
+ String garbageShardId = "shardId-garbage-001";
+ KinesisClientLease garbageLease = ShardSyncer.newKCLLease(ShardObjectHelper.newShard(garbageShardId,
+ null,
+ null,
+ ShardObjectHelper.newSequenceNumberRange("101", null)));
+ garbageLease.setCheckpoint(new ExtendedSequenceNumber("999"));
+ leaseManager.createLeaseIfNotExists(garbageLease);
+ Assert.assertEquals(garbageShardId, leaseManager.getLease(garbageShardId).getLeaseKey());
+ testBootstrapShardLeasesAtStartingPosition(InitialPositionInStream.LATEST);
+ Assert.assertNull(leaseManager.getLease(garbageShardId));
+ }
+
+ private void testBootstrapShardLeasesAtStartingPosition(InitialPositionInStream initialPosition)
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException, IOException,
+ KinesisClientLibIOException {
+ List shards = new ArrayList();
+ SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null);
+
+ String shardId0 = "shardId-0";
+ shards.add(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange));
+ String shardId1 = "shardId-1";
+ shards.add(ShardObjectHelper.newShard(shardId1, null, null, sequenceRange));
+ File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 10, "testBootstrap1");
+ dataFile.deleteOnExit();
+ IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath());
+
+ ShardSyncer.bootstrapShardLeases(kinesisProxy, leaseManager, initialPosition, cleanupLeasesOfCompletedShards);
+ List newLeases = leaseManager.listLeases();
+ Assert.assertEquals(2, newLeases.size());
+ Set expectedLeaseShardIds = new HashSet();
+ expectedLeaseShardIds.add(shardId0);
+ expectedLeaseShardIds.add(shardId1);
+ for (KinesisClientLease lease1 : newLeases) {
+ Assert.assertTrue(expectedLeaseShardIds.contains(lease1.getLeaseKey()));
+ Assert.assertEquals(new ExtendedSequenceNumber(initialPosition.toString()), lease1.getCheckpoint());
+ }
+ dataFile.delete();
+ }
+
+ /**
+ * Test determineNewLeasesToCreate() starting at latest and at trim horizon ("beginning" of shard)
+ */
+ @Test
+ public final void testDetermineNewLeasesToCreateStartingPosition() {
+ List shards = new ArrayList();
+ List currentLeases = new ArrayList();
+ SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null);
+
+ String shardId0 = "shardId-0";
+ shards.add(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange));
+
+ String shardId1 = "shardId-1";
+ shards.add(ShardObjectHelper.newShard(shardId1, null, null, sequenceRange));
+
+ Set initialPositions = new HashSet();
+ initialPositions.add(InitialPositionInStream.LATEST);
+ initialPositions.add(InitialPositionInStream.TRIM_HORIZON);
+
+ for (InitialPositionInStream initialPosition : initialPositions) {
+ List newLeases =
+ ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, initialPosition);
+ Assert.assertEquals(2, newLeases.size());
+ Set expectedLeaseShardIds = new HashSet();
+ expectedLeaseShardIds.add(shardId0);
+ expectedLeaseShardIds.add(shardId1);
+ for (KinesisClientLease lease : newLeases) {
+ Assert.assertTrue(expectedLeaseShardIds.contains(lease.getLeaseKey()));
+ Assert.assertEquals(new ExtendedSequenceNumber(initialPosition.toString()), lease.getCheckpoint());
+ }
+ }
+ }
+
+ /**
+ * Test determineNewLeasesToCreate() - 1 closed and 1 open shard (ignore closed shard)
+ */
+ @Test
+ public final void testDetermineNewLeasesToCreateIgnoreClosedShard() {
+ List shards = new ArrayList();
+ List currentLeases = new ArrayList();
+
+ shards.add(ShardObjectHelper.newShard("shardId-0",
+ null,
+ null,
+ ShardObjectHelper.newSequenceNumberRange("303", "404")));
+ String lastShardId = "shardId-1";
+ shards.add(ShardObjectHelper.newShard(lastShardId,
+ null,
+ null,
+ ShardObjectHelper.newSequenceNumberRange("405", null)));
+
+ List newLeases =
+ ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, InitialPositionInStream.LATEST);
+ Assert.assertEquals(1, newLeases.size());
+ Assert.assertEquals(lastShardId, newLeases.get(0).getLeaseKey());
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position Latest)
+ * Shard structure (each level depicts a stream segment):
+ * 0 1 2 3 4 5- shards till epoch 102
+ * \ / \ / | |
+ * 6 7 4 5- shards from epoch 103 - 205
+ * \ / | /\
+ * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber)
+ * Current leases: (3, 4, 5)
+ */
+ @Test
+ public final void testDetermineNewLeasesToCreateSplitMergeLatest1() {
+ List shards = constructShardListForGraphA();
+ List currentLeases = new ArrayList();
+
+ currentLeases.add(newLease("shardId-3"));
+ currentLeases.add(newLease("shardId-4"));
+ currentLeases.add(newLease("shardId-5"));
+
+ List newLeases =
+ ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, InitialPositionInStream.LATEST);
+ Map expectedShardIdCheckpointMap =
+ new HashMap();
+ expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST);
+ expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.LATEST);
+ expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.TRIM_HORIZON);
+
+ Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size());
+ for (KinesisClientLease lease : newLeases) {
+ Assert.assertTrue("Unexpected lease: " + lease,
+ expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey()));
+ Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint());
+ }
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position Latest)
+ * Shard structure (each level depicts a stream segment):
+ * 0 1 2 3 4 5- shards till epoch 102
+ * \ / \ / | |
+ * 6 7 4 5- shards from epoch 103 - 205
+ * \ / | /\
+ * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber)
+ * Current leases: (4, 5, 7)
+ */
+ @Test
+ public final void testDetermineNewLeasesToCreateSplitMergeLatest2() {
+ List shards = constructShardListForGraphA();
+ List currentLeases = new ArrayList();
+
+ currentLeases.add(newLease("shardId-4"));
+ currentLeases.add(newLease("shardId-5"));
+ currentLeases.add(newLease("shardId-7"));
+
+ List newLeases =
+ ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, InitialPositionInStream.LATEST);
+ Map expectedShardIdCheckpointMap =
+ new HashMap();
+ expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST);
+
+ Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size());
+ for (KinesisClientLease lease : newLeases) {
+ Assert.assertTrue("Unexpected lease: " + lease,
+ expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey()));
+ Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint());
+ }
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position TrimHorizon)
+ * Shard structure (each level depicts a stream segment):
+ * 0 1 2 3 4 5- shards till epoch 102
+ * \ / \ / | |
+ * 6 7 4 5- shards from epoch 103 - 205
+ * \ / | /\
+ * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber)
+ * Current leases: (3, 4, 5)
+ */
+ @Test
+ public final void testDetermineNewLeasesToCreateSplitMergeHorizon1() {
+ List shards = constructShardListForGraphA();
+ List currentLeases = new ArrayList();
+
+ currentLeases.add(newLease("shardId-3"));
+ currentLeases.add(newLease("shardId-4"));
+ currentLeases.add(newLease("shardId-5"));
+
+ List newLeases =
+ ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, InitialPositionInStream.TRIM_HORIZON);
+ Map expectedShardIdCheckpointMap =
+ new HashMap();
+ expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON);
+
+ Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size());
+ for (KinesisClientLease lease : newLeases) {
+ Assert.assertTrue("Unexpected lease: " + lease,
+ expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey()));
+ Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint());
+ }
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position TrimHorizon)
+ * Shard structure (each level depicts a stream segment):
+ * 0 1 2 3 4 5- shards till epoch 102
+ * \ / \ / | |
+ * 6 7 4 5- shards from epoch 103 - 205
+ * \ / | /\
+ * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber)
+ * Current leases: (4, 5, 7)
+ */
+ @Test
+ public final void testDetermineNewLeasesToCreateSplitMergeHorizon2() {
+ List shards = constructShardListForGraphA();
+ List currentLeases = new ArrayList();
+
+ currentLeases.add(newLease("shardId-4"));
+ currentLeases.add(newLease("shardId-5"));
+ currentLeases.add(newLease("shardId-7"));
+
+ List newLeases =
+ ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, InitialPositionInStream.TRIM_HORIZON);
+ Map expectedShardIdCheckpointMap =
+ new HashMap();
+ expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON);
+ expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON);
+
+ Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size());
+ for (KinesisClientLease lease : newLeases) {
+ Assert.assertTrue("Unexpected lease: " + lease,
+ expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey()));
+ Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint());
+ }
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position TrimHorizon)
+ * For shard graph B (see the construct method doc for structure).
+ *
+ * Current leases: empty set
+ */
+ @Test
+ public final void testDetermineNewLeasesToCreateGraphBNoInitialLeasesTrim() {
+ List shards = constructShardListForGraphB();
+ List currentLeases = new ArrayList();
+ List newLeases =
+ ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, InitialPositionInStream.TRIM_HORIZON);
+ Map expectedShardIdCheckpointMap =
+ new HashMap();
+ for (int i = 0; i < 11; i++) {
+ String expectedShardId = "shardId-" + i;
+ expectedShardIdCheckpointMap.put(expectedShardId, ExtendedSequenceNumber.TRIM_HORIZON);
+ }
+
+ Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size());
+ for (KinesisClientLease lease : newLeases) {
+ Assert.assertTrue("Unexpected lease: " + lease,
+ expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey()));
+ Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint());
+ }
+ }
+
+ /*
+ * Helper method to construct a shard list for graph A. Graph A is defined below.
+ * Shard structure (y-axis is epochs):
+ * 0 1 2 3 4 5- shards till epoch 102
+ * \ / \ / | |
+ * 6 7 4 5- shards from epoch 103 - 205
+ * \ / | /\
+ * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber)
+ */
+ List constructShardListForGraphA() {
+ List shards = new ArrayList();
+
+ SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("11", "102");
+ SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("11", null);
+ SequenceNumberRange range2 = ShardObjectHelper.newSequenceNumberRange("11", "205");
+ SequenceNumberRange range3 = ShardObjectHelper.newSequenceNumberRange("103", "205");
+ SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("206", null);
+
+ HashKeyRange hashRange0 = ShardObjectHelper.newHashKeyRange("0", "99");
+ HashKeyRange hashRange1 = ShardObjectHelper.newHashKeyRange("100", "199");
+ HashKeyRange hashRange2 = ShardObjectHelper.newHashKeyRange("200", "299");
+ HashKeyRange hashRange3 = ShardObjectHelper.newHashKeyRange("300", "399");
+ HashKeyRange hashRange4 = ShardObjectHelper.newHashKeyRange("400", "499");
+ HashKeyRange hashRange5 = ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY);
+ HashKeyRange hashRange6 = ShardObjectHelper.newHashKeyRange("0", "199");
+ HashKeyRange hashRange7 = ShardObjectHelper.newHashKeyRange("200", "399");
+ HashKeyRange hashRange8 = ShardObjectHelper.newHashKeyRange("0", "399");
+ HashKeyRange hashRange9 = ShardObjectHelper.newHashKeyRange("500", "799");
+ HashKeyRange hashRange10 = ShardObjectHelper.newHashKeyRange("800", ShardObjectHelper.MAX_HASH_KEY);
+
+ shards.add(ShardObjectHelper.newShard("shardId-0", null, null, range0, hashRange0));
+ shards.add(ShardObjectHelper.newShard("shardId-1", null, null, range0, hashRange1));
+ shards.add(ShardObjectHelper.newShard("shardId-2", null, null, range0, hashRange2));
+ shards.add(ShardObjectHelper.newShard("shardId-3", null, null, range0, hashRange3));
+ shards.add(ShardObjectHelper.newShard("shardId-4", null, null, range1, hashRange4));
+ shards.add(ShardObjectHelper.newShard("shardId-5", null, null, range2, hashRange5));
+
+ shards.add(ShardObjectHelper.newShard("shardId-6", "shardId-0", "shardId-1", range3, hashRange6));
+ shards.add(ShardObjectHelper.newShard("shardId-7", "shardId-2", "shardId-3", range3, hashRange7));
+
+ shards.add(ShardObjectHelper.newShard("shardId-8", "shardId-6", "shardId-7", range4, hashRange8));
+ shards.add(ShardObjectHelper.newShard("shardId-9", "shardId-5", null, range4, hashRange9));
+ shards.add(ShardObjectHelper.newShard("shardId-10", null, "shardId-5", range4, hashRange10));
+
+ return shards;
+ }
+
+ /*
+ * Helper method to construct a shard list for graph B. Graph B is defined below.
+ * Shard structure (x-axis is epochs):
+ * 0 3 6 9
+ * \ / \ / \ /
+ * 2 5 8
+ * / \ / \ / \
+ * 1 4 7 10
+ */
+ List constructShardListForGraphB() {
+ List shards = new ArrayList();
+
+ SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("1000", "1049");
+ SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("1050", "1099");
+ SequenceNumberRange range2 = ShardObjectHelper.newSequenceNumberRange("1100", "1149");
+ SequenceNumberRange range3 = ShardObjectHelper.newSequenceNumberRange("1150", "1199");
+ SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("1200", "1249");
+ SequenceNumberRange range5 = ShardObjectHelper.newSequenceNumberRange("1250", "1299");
+ SequenceNumberRange range6 = ShardObjectHelper.newSequenceNumberRange("1300", null);
+
+ HashKeyRange hashRange0 = ShardObjectHelper.newHashKeyRange("0", "499");
+ HashKeyRange hashRange1 = ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY);
+ HashKeyRange hashRange2 = ShardObjectHelper.newHashKeyRange("0", ShardObjectHelper.MAX_HASH_KEY);
+
+ shards.add(ShardObjectHelper.newShard("shardId-0", null, null, range0, hashRange0));
+ shards.add(ShardObjectHelper.newShard("shardId-1", null, null, range0, hashRange1));
+ shards.add(ShardObjectHelper.newShard("shardId-2", "shardId-0", "shardId-1", range1, hashRange2));
+ shards.add(ShardObjectHelper.newShard("shardId-3", "shardId-2", null, range2, hashRange0));
+ shards.add(ShardObjectHelper.newShard("shardId-4", "shardId-2", null, range2, hashRange1));
+ shards.add(ShardObjectHelper.newShard("shardId-5", "shardId-3", "shardId-4", range3, hashRange2));
+ shards.add(ShardObjectHelper.newShard("shardId-6", "shardId-5", null, range4, hashRange0));
+ shards.add(ShardObjectHelper.newShard("shardId-7", "shardId-5", null, range4, hashRange1));
+ shards.add(ShardObjectHelper.newShard("shardId-8", "shardId-6", "shardId-7", range5, hashRange2));
+ shards.add(ShardObjectHelper.newShard("shardId-9", "shardId-8", null, range6, hashRange0));
+ shards.add(ShardObjectHelper.newShard("shardId-10", null, "shardId-8", range6, hashRange1));
+
+ return shards;
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors when shardId is null
+ */
+ @Test
+ public final void testCheckIfDescendantAndAddNewLeasesForAncestorsNullShardId() {
+ Map memoizationContext = new HashMap<>();
+ Assert.assertFalse(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(null,
+ latestPosition,
+ null,
+ null,
+ null,
+ memoizationContext));
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors when shard has been trimmed
+ */
+ @Test
+ public final void testCheckIfDescendantAndAddNewLeasesForAncestorsTrimmedShard() {
+ String shardId = "shardId-trimmed";
+ Map kinesisShards = new HashMap();
+ Map memoizationContext = new HashMap<>();
+ Assert.assertFalse(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId,
+ latestPosition,
+ null,
+ kinesisShards,
+ null,
+ memoizationContext));
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors when there is a current lease for the shard
+ */
+ @Test
+ public final void testCheckIfDescendantAndAddNewLeasesForAncestorsForShardWithCurrentLease() {
+ String shardId = "shardId-current";
+ Map kinesisShards = new HashMap();
+ kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, null, null, null));
+ Set shardIdsOfCurrentLeases = new HashSet();
+ shardIdsOfCurrentLeases.add(shardId);
+ Map newLeaseMap = new HashMap();
+ Map memoizationContext = new HashMap<>();
+ Assert.assertTrue(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId,
+ latestPosition,
+ shardIdsOfCurrentLeases,
+ kinesisShards,
+ newLeaseMap,
+ memoizationContext));
+ Assert.assertTrue(newLeaseMap.isEmpty());
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, two ancestors, not descendant
+ */
+ @Test
+ public final void testCheckIfDescendantAndAddNewLeasesForAncestors2P2ANotDescendant() {
+ Set shardIdsOfCurrentLeases = new HashSet();
+ Map newLeaseMap = new HashMap();
+ Map kinesisShards = new HashMap();
+
+ String parentShardId = "shardId-parent";
+ kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null));
+
+ String adjacentParentShardId = "shardId-adjacentParent";
+ kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null));
+
+ String shardId = "shardId-9-1";
+ kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null));
+
+ Map memoizationContext = new HashMap<>();
+ Assert.assertFalse(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId,
+ latestPosition,
+ shardIdsOfCurrentLeases,
+ kinesisShards,
+ newLeaseMap,
+ memoizationContext));
+ Assert.assertTrue(newLeaseMap.isEmpty());
+ }
+
+ /**
+ * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, there is a lease for one parent.
+ */
+ @Test
+ public final void testCheckIfDescendantAndAddNewLeasesForAncestors2P2A1PDescendant() {
+ Set shardIdsOfCurrentLeases = new HashSet();
+ Map newLeaseMap = new HashMap();
+ Map kinesisShards = new HashMap();
+
+ String parentShardId = "shardId-parent";
+ kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null));
+ shardIdsOfCurrentLeases.add(parentShardId);
+
+ String adjacentParentShardId = "shardId-adjacentParent";
+ kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null));
+
+ String shardId = "shardId-9-1";
+ Shard shard = ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null);
+ kinesisShards.put(shardId, shard);
+
+ Map memoizationContext = new HashMap<>();
+ Assert.assertTrue(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId,
+ latestPosition,
+ shardIdsOfCurrentLeases,
+ kinesisShards,
+ newLeaseMap,
+ memoizationContext));
+ Assert.assertEquals(1, newLeaseMap.size());
+ Assert.assertTrue(newLeaseMap.containsKey(adjacentParentShardId));
+ KinesisClientLease adjacentParentLease = newLeaseMap.get(adjacentParentShardId);
+ Assert.assertEquals(ExtendedSequenceNumber.LATEST, adjacentParentLease.getCheckpoint());
+ }
+
+ /**
+ * Test getParentShardIds() when the shard has no parents.
+ */
+ @Test
+ public final void testGetParentShardIdsNoParents() {
+ Shard shard = new Shard();
+ Assert.assertTrue(ShardSyncer.getParentShardIds(shard, null).isEmpty());
+ }
+
+ /**
+ * Test getParentShardIds() when the shard has no parents.
+ */
+ @Test
+ public final void testGetParentShardIdsTrimmedParents() {
+ Map shardMap = new HashMap();
+ Shard shard = ShardObjectHelper.newShard("shardId-test", "foo", "bar", null);
+ Assert.assertTrue(ShardSyncer.getParentShardIds(shard, shardMap).isEmpty());
+ }
+
+ /**
+ * Test getParentShardIds() when the shard has a single parent.
+ */
+ @Test
+ public final void testGetParentShardIdsSingleParent() {
+ Map shardMap = new HashMap();
+
+ String parentShardId = "shardId-parent";
+ shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null));
+
+ Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, null, null);
+ Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap);
+ Assert.assertEquals(1, parentShardIds.size());
+ Assert.assertTrue(parentShardIds.contains(parentShardId));
+
+ shard.setParentShardId(null);
+ parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap);
+ Assert.assertTrue(parentShardIds.isEmpty());
+
+ shard.setAdjacentParentShardId(parentShardId);
+ parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap);
+ Assert.assertEquals(1, parentShardIds.size());
+ Assert.assertTrue(parentShardIds.contains(parentShardId));
+ }
+
+ /**
+ * Test getParentShardIds() when the shard has two parents, one is trimmed.
+ */
+ @Test
+ public final void testGetParentShardIdsOneTrimmedParent() {
+ Map shardMap = new HashMap();
+
+ String parentShardId = "shardId-parent";
+ Shard parent = ShardObjectHelper.newShard(parentShardId, null, null, null);
+
+ String adjacentParentShardId = "shardId-adjacentParent";
+ Shard adjacentParent = ShardObjectHelper.newShard(adjacentParentShardId, null, null, null);
+
+ Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null);
+
+ shardMap.put(parentShardId, parent);
+ Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap);
+ Assert.assertEquals(1, parentShardIds.size());
+ Assert.assertTrue(parentShardIds.contains(parentShardId));
+
+ shardMap.remove(parentShardId);
+ parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap);
+ Assert.assertTrue(parentShardIds.isEmpty());
+
+ shardMap.put(adjacentParentShardId, adjacentParent);
+ parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap);
+ Assert.assertEquals(1, parentShardIds.size());
+ Assert.assertTrue(parentShardIds.contains(adjacentParentShardId));
+ }
+
+ /**
+ * Test getParentShardIds() when the shard has two parents.
+ */
+ @Test
+ public final void testGetParentShardIdsTwoParents() {
+ Map shardMap = new HashMap();
+
+ String parentShardId = "shardId-parent";
+ shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null));
+
+ String adjacentParentShardId = "shardId-adjacentParent";
+ shardMap.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null));
+
+ Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null);
+
+ Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap);
+ Assert.assertEquals(2, parentShardIds.size());
+ Assert.assertTrue(parentShardIds.contains(parentShardId));
+ Assert.assertTrue(parentShardIds.contains(adjacentParentShardId));
+ }
+
+ /**
+ */
+ @Test
+ public final void testNewLease() {
+ Shard shard = new Shard();
+ String shardId = "shardId-95";
+ shard.setShardId(shardId);
+ String parentShardId = "shardId-parent";
+ String adjacentParentShardId = "shardId-adjacentParent";
+ shard.setParentShardId(parentShardId);
+ shard.setAdjacentParentShardId(adjacentParentShardId);
+
+ KinesisClientLease lease = ShardSyncer.newKCLLease(shard);
+ Assert.assertEquals(shardId, lease.getLeaseKey());
+ Assert.assertNull(lease.getCheckpoint());
+ Set parentIds = lease.getParentShardIds();
+ Assert.assertEquals(2, parentIds.size());
+ Assert.assertTrue(parentIds.contains(parentShardId));
+ Assert.assertTrue(parentIds.contains(adjacentParentShardId));
+ }
+
+ /**
+ * Test method for constructShardIdToShardMap.
+ *
+ * .
+ */
+ @Test
+ public final void testConstructShardIdToShardMap() {
+ List shards = new ArrayList(2);
+ shards.add(ShardObjectHelper.newShard("shardId-0", null, null, null));
+ shards.add(ShardObjectHelper.newShard("shardId-1", null, null, null));
+
+ Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards);
+ Assert.assertEquals(shards.size(), shardIdToShardMap.size());
+ for (Shard shard : shards) {
+ Assert.assertSame(shard, shardIdToShardMap.get(shard.getShardId()));
+ }
+ }
+
+ /**
+ * Test getOpenShards() - no shards are open.
+ */
+ @Test
+ public final void testGetOpenShardsNoneOpen() {
+ List shards = new ArrayList();
+ shards.add(ShardObjectHelper.newShard("shardId-9384",
+ null,
+ null,
+ ShardObjectHelper.newSequenceNumberRange("123", "345")));
+ Assert.assertTrue(ShardSyncer.getOpenShards(shards).isEmpty());
+ }
+
+ /**
+ * Test getOpenShards() - test null and max end sequence number.
+ */
+ @Test
+ public final void testGetOpenShardsNullAndMaxEndSeqNum() {
+ List shards = new ArrayList();
+ String shardId = "shardId-2738";
+ SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("123", null);
+ shards.add(ShardObjectHelper.newShard(shardId, null, null, sequenceNumberRange));
+
+ // Verify shard is considered open when it has a null end sequence number
+ List openShards = ShardSyncer.getOpenShards(shards);
+ Assert.assertEquals(1, openShards.size());
+ Assert.assertEquals(shardId, openShards.get(0).getShardId());
+
+ // Close shard before testing for max sequence number
+ sequenceNumberRange.setEndingSequenceNumber("1000");
+ openShards = ShardSyncer.getOpenShards(shards);
+ Assert.assertTrue(openShards.isEmpty());
+
+ // Verify shard is considered closed when the end sequence number is set to max allowed sequence number
+ sequenceNumberRange.setEndingSequenceNumber(MAX_SEQUENCE_NUMBER.toString());
+ openShards = ShardSyncer.getOpenShards(shards);
+ Assert.assertEquals(0, openShards.size());
+ }
+
+ /**
+ * Test isCandidateForCleanup
+ *
+ * @throws KinesisClientLibIOException
+ */
+ @Test
+ public final void testIsCandidateForCleanup() throws KinesisClientLibIOException {
+ String parentShardId = "shardId-0000";
+ String adjacentParentShardId = "shardId-0001";
+ String shardId = "shardId-0002";
+ KinesisClientLease lease = newLease(shardId);
+ List parentShardIds = new ArrayList<>();
+ parentShardIds.add(parentShardId);
+ parentShardIds.add(adjacentParentShardId);
+ lease.setParentShardIds(parentShardIds);
+ Set currentKinesisShardIds = new HashSet<>();
+
+ currentKinesisShardIds.add(shardId);
+ Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds));
+
+ currentKinesisShardIds.clear();
+ Assert.assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds));
+
+ currentKinesisShardIds.add(parentShardId);
+ // Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds));
+
+ currentKinesisShardIds.clear();
+ Assert.assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds));
+
+ currentKinesisShardIds.add(adjacentParentShardId);
+ // Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds));
+ currentKinesisShardIds.add(parentShardId);
+ // Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds));
+ currentKinesisShardIds.add(shardId);
+ Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds));
+ }
+
+ /**
+ * Test isCandidateForCleanup
+ *
+ * @throws KinesisClientLibIOException
+ */
+ @Test(expected = KinesisClientLibIOException.class)
+ public final void testIsCandidateForCleanupParentExists() throws KinesisClientLibIOException {
+ String parentShardId = "shardId-0000";
+ String adjacentParentShardId = "shardId-0001";
+ String shardId = "shardId-0002";
+ KinesisClientLease lease = newLease(shardId);
+ List parentShardIds = new ArrayList<>();
+ parentShardIds.add(parentShardId);
+ parentShardIds.add(adjacentParentShardId);
+ lease.setParentShardIds(parentShardIds);
+ Set currentKinesisShardIds = new HashSet<>();
+
+ currentKinesisShardIds.add(parentShardId);
+ Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds));
+ }
+
+ /**
+ * Test isCandidateForCleanup
+ *
+ * @throws KinesisClientLibIOException
+ */
+ @Test(expected = KinesisClientLibIOException.class)
+ public final void testIsCandidateForCleanupAdjacentParentExists() throws KinesisClientLibIOException {
+ String parentShardId = "shardId-0000";
+ String adjacentParentShardId = "shardId-0001";
+ String shardId = "shardId-0002";
+ KinesisClientLease lease = newLease(shardId);
+ List parentShardIds = new ArrayList<>();
+ parentShardIds.add(parentShardId);
+ parentShardIds.add(adjacentParentShardId);
+ lease.setParentShardIds(parentShardIds);
+ Set currentKinesisShardIds = new HashSet<>();
+
+ currentKinesisShardIds.add(adjacentParentShardId);
+ Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds));
+ }
+
+ /**
+ * Test cleanup of lease for a shard that has been fully processed (and processing of child shards has begun).
+ *
+ * @throws DependencyException
+ * @throws InvalidStateException
+ * @throws ProvisionedThroughputException
+ */
+ @Test
+ public final void testCleanupLeaseForClosedShard()
+ throws DependencyException, InvalidStateException, ProvisionedThroughputException {
+ String closedShardId = "shardId-2";
+ KinesisClientLease leaseForClosedShard = newLease(closedShardId);
+ leaseForClosedShard.setCheckpoint(new ExtendedSequenceNumber("1234"));
+ leaseManager.createLeaseIfNotExists(leaseForClosedShard);
+
+ Set childShardIds = new HashSet<>();
+ List trackedLeases = new ArrayList<>();
+ Set parentShardIds = new HashSet<>();
+ parentShardIds.add(closedShardId);
+ String childShardId1 = "shardId-5";
+ KinesisClientLease childLease1 = newLease(childShardId1);
+ childLease1.setParentShardIds(parentShardIds);
+ childLease1.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON);
+ String childShardId2 = "shardId-7";
+ KinesisClientLease childLease2 = newLease(childShardId2);
+ childLease2.setParentShardIds(parentShardIds);
+ childLease2.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON);
+ Map trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases);
+
+ // empty list of leases
+ ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager);
+ Assert.assertNotNull(leaseManager.getLease(closedShardId));
+
+ // closed shard has not been fully processed yet (checkpoint != SHARD_END)
+ trackedLeases.add(leaseForClosedShard);
+ trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases);
+ ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager);
+ Assert.assertNotNull(leaseManager.getLease(closedShardId));
+
+ // closed shard has been fully processed yet (checkpoint == SHARD_END)
+ leaseForClosedShard.setCheckpoint(ExtendedSequenceNumber.SHARD_END);
+ leaseManager.updateLease(leaseForClosedShard);
+ ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager);
+ Assert.assertNull(leaseManager.getLease(closedShardId));
+
+ // lease for only one child exists
+ childShardIds.add(childShardId1);
+ childShardIds.add(childShardId2);
+ leaseManager.createLeaseIfNotExists(leaseForClosedShard);
+ leaseManager.createLeaseIfNotExists(childLease1);
+ trackedLeases.add(childLease1);
+ trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases);
+ ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager);
+ Assert.assertNotNull(leaseManager.getLease(closedShardId));
+
+ // leases for both children exists, but they are both at TRIM_HORIZON
+ leaseManager.createLeaseIfNotExists(childLease2);
+ trackedLeases.add(childLease2);
+ trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases);
+ ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager);
+ Assert.assertNotNull(leaseManager.getLease(closedShardId));
+
+ // leases for both children exists, one is at TRIM_HORIZON
+ childLease1.setCheckpoint(new ExtendedSequenceNumber("34890"));
+ leaseManager.updateLease(childLease1);
+ ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager);
+ Assert.assertNotNull(leaseManager.getLease(closedShardId));
+
+ // leases for both children exists, NONE of them are at TRIM_HORIZON
+ childLease2.setCheckpoint(new ExtendedSequenceNumber("43789"));
+ leaseManager.updateLease(childLease2);
+ ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager);
+ Assert.assertNull(leaseManager.getLease(closedShardId));
+ }
+
+ /**
+ * Test we can handle trimmed Kinesis shards (absent from the shard list), and valid closed shards.
+ *
+ * @throws KinesisClientLibIOException
+ */
+ @Test
+ public final void testAssertShardCoveredOrAbsentTestAbsentAndValid() throws KinesisClientLibIOException {
+ List shards = new ArrayList<>();
+ String expectedClosedShardId = "shardId-34098";
+ SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205");
+ HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25");
+ Shard closedShard =
+ ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange);
+ SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300");
+ Shard child1 =
+ ShardObjectHelper.newShard("shardId-54879", expectedClosedShardId, null, childSequenceNumberRange);
+ Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards);
+ Map> shardIdToChildShardIdsMap =
+ ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
+ Set closedShardIds = new HashSet<>();
+ closedShardIds.add(expectedClosedShardId);
+ ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds);
+
+ // test for case where shard has been trimmed (absent from list)
+ ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds);
+
+ // Populate shards.
+ shards.add(closedShard);
+ shards.add(child1);
+ shardIdToShardMap.put(expectedClosedShardId, closedShard);
+ shardIdToShardMap.put(child1.getShardId(), child1);
+ shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
+
+ // test degenerate split/merge
+ child1.setHashKeyRange(hashKeyRange);
+ ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds);
+
+ // test merge
+ child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("10", "2985"));
+ ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds);
+ child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("3", "25"));
+ ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds);
+
+ // test split
+ HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15");
+ HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25");
+ child1.setHashKeyRange(childHashKeyRange1);
+ Shard child2 = ShardObjectHelper.newShard("shardId-43789",
+ null,
+ expectedClosedShardId,
+ childSequenceNumberRange,
+ childHashKeyRange2);
+ shards.add(child2);
+ shardIdToShardMap.put(child2.getShardId(), child2);
+ shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
+ ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds);
+ }
+
+ /**
+ * Test we throw an exception if the shard is open
+ *
+ * @throws KinesisClientLibIOException
+ */
+ @Test(expected = KinesisClientLibIOException.class)
+ public final void testAssertShardCoveredOrAbsentTestOpen() throws KinesisClientLibIOException {
+ List shards = new ArrayList<>();
+ String expectedClosedShardId = "shardId-34098";
+ SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", null);
+ HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25");
+ Shard openShard =
+ ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange);
+ shards.add(openShard);
+ Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards);
+ Map> shardIdToChildShardIdsMap =
+ ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
+ Set closedShardIds = new HashSet<>();
+ closedShardIds.add(expectedClosedShardId);
+ ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds);
+ }
+
+ /**
+ * Test we throw an exception if there are no children
+ *
+ * @throws KinesisClientLibIOException
+ */
+ @Test(expected = KinesisClientLibIOException.class)
+ public final void testAssertShardCoveredOrAbsentTestNoChildren() throws KinesisClientLibIOException {
+ List shards = new ArrayList<>();
+ String expectedClosedShardId = "shardId-34098";
+ SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205");
+ HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25");
+ Shard closedShard =
+ ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange);
+ shards.add(closedShard);
+ Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards);
+ Map> shardIdToChildShardIdsMap =
+ ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
+ Set closedShardIds = new HashSet<>();
+ closedShardIds.add(expectedClosedShardId);
+ ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds);
+ }
+
+ /**
+ * Test we throw an exception if children don't cover hash key range (min of children > min of parent)
+ *
+ * @throws KinesisClientLibIOException
+ */
+ @Test(expected = KinesisClientLibIOException.class)
+ public final void testAssertShardCoveredOrAbsentTestIncompleteSplitMin() throws KinesisClientLibIOException {
+ HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25");
+ HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("12", "15");
+ HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25");
+ testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2);
+ }
+
+ /**
+ * Test we throw an exception if children don't cover hash key range (max of children < max of parent)
+ *
+ * @throws KinesisClientLibIOException
+ */
+ @Test(expected = KinesisClientLibIOException.class)
+ public final void testAssertShardCoveredOrAbsentTestIncompleteSplitMax() throws KinesisClientLibIOException {
+ HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25");
+ HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15");
+ HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "23");
+ testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2);
+ }
+
+ private void testAssertShardCoveredOrAbsentTestIncompleteSplit(HashKeyRange parentHashKeyRange,
+ HashKeyRange child1HashKeyRange,
+ HashKeyRange child2HashKeyRange)
+ throws KinesisClientLibIOException {
+ List shards = new ArrayList<>();
+ String expectedClosedShardId = "shardId-34098";
+ SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205");
+ Shard closedShard =
+ ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, parentHashKeyRange);
+ shards.add(closedShard);
+
+ SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300");
+ Shard child1 = ShardObjectHelper.newShard("shardId-43789",
+ null,
+ expectedClosedShardId,
+ childSequenceNumberRange,
+ child1HashKeyRange);
+ shards.add(child1);
+ Shard child2 = ShardObjectHelper.newShard("shardId-43789",
+ null,
+ expectedClosedShardId,
+ childSequenceNumberRange,
+ child2HashKeyRange);
+ shards.add(child2);
+
+ Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards);
+ Map> shardIdToChildShardIdsMap =
+ ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
+ Set closedShardIds = new HashSet<>();
+ closedShardIds.add(expectedClosedShardId);
+ ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds);
+ }
+
+ /**
+ * Helper method.
+ *
+ * @param shardId
+ * @return
+ */
+ private KinesisClientLease newLease(String shardId) {
+ KinesisClientLease lease = new KinesisClientLease();
+ lease.setLeaseKey(shardId);
+
+ return lease;
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownTaskTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownTaskTest.java
new file mode 100644
index 00000000..6b77f818
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownTaskTest.java
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import junit.framework.Assert;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.KinesisClientLibIOException;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager;
+import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
+
+/**
+ *
+ */
+public class ShutdownTaskTest {
+ private static final long TASK_BACKOFF_TIME_MILLIS = 1L;
+ Set defaultParentShardIds = new HashSet<>();
+ String defaultConcurrencyToken = "testToken4398";
+ String defaultShardId = "shardId-0000397840";
+ ShardInfo defaultShardInfo = new ShardInfo(defaultShardId,
+ defaultConcurrencyToken,
+ defaultParentShardIds);
+ IRecordProcessor defaultRecordProcessor = new TestStreamlet();
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @Before
+ public void setUp() throws Exception {
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ /**
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownTask#call()}.
+ */
+ @Test
+ public final void testCallWhenApplicationDoesNotCheckpoint() {
+ RecordProcessorCheckpointer checkpointer = mock(RecordProcessorCheckpointer.class);
+ when(checkpointer.getLastCheckpointValue()).thenReturn(new ExtendedSequenceNumber("3298"));
+ IKinesisProxy kinesisProxy = mock(IKinesisProxy.class);
+ ILeaseManager leaseManager = mock(KinesisClientLeaseManager.class);
+ boolean cleanupLeasesOfCompletedShards = false;
+ ShutdownTask task =
+ new ShutdownTask(defaultShardInfo,
+ defaultRecordProcessor,
+ checkpointer,
+ ShutdownReason.TERMINATE,
+ kinesisProxy,
+ InitialPositionInStream.TRIM_HORIZON,
+ cleanupLeasesOfCompletedShards ,
+ leaseManager,
+ TASK_BACKOFF_TIME_MILLIS);
+ TaskResult result = task.call();
+ Assert.assertNotNull(result.getException());
+ Assert.assertTrue(result.getException() instanceof IllegalArgumentException);
+ }
+
+ /**
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownTask#call()}.
+ */
+ @Test
+ public final void testCallWhenSyncingShardsThrows() {
+ RecordProcessorCheckpointer checkpointer = mock(RecordProcessorCheckpointer.class);
+ when(checkpointer.getLastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END);
+ IKinesisProxy kinesisProxy = mock(IKinesisProxy.class);
+ when(kinesisProxy.getShardList()).thenReturn(null);
+ ILeaseManager leaseManager = mock(KinesisClientLeaseManager.class);
+ boolean cleanupLeasesOfCompletedShards = false;
+ ShutdownTask task =
+ new ShutdownTask(defaultShardInfo,
+ defaultRecordProcessor,
+ checkpointer,
+ ShutdownReason.TERMINATE,
+ kinesisProxy,
+ InitialPositionInStream.TRIM_HORIZON,
+ cleanupLeasesOfCompletedShards ,
+ leaseManager,
+ TASK_BACKOFF_TIME_MILLIS);
+ TaskResult result = task.call();
+ Assert.assertNotNull(result.getException());
+ Assert.assertTrue(result.getException() instanceof KinesisClientLibIOException);
+ }
+
+ /**
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownTask#getTaskType()}.
+ */
+ @Test
+ public final void testGetTaskType() {
+ ShutdownTask task = new ShutdownTask(null, null, null, null, null, null, false, null, 0);
+ Assert.assertEquals(TaskType.SHUTDOWN, task.getTaskType());
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamlet.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamlet.java
new file mode 100644
index 00000000..d9391e8a
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamlet.java
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Semaphore;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import com.amazonaws.services.kinesis.model.Record;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibNonRetryableException;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
+import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
+import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
+import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput;
+import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
+
+/**
+ * Streamlet that tracks records it's seen - useful for testing.
+ */
+class TestStreamlet implements IRecordProcessor {
+
+ private static final Log LOG = LogFactory.getLog(TestStreamlet.class);
+
+ private List records = new ArrayList();
+
+ private Set processedSeqNums = new HashSet(); // used for deduping
+
+ private Semaphore sem; // used to allow test cases to wait for all records to be processed
+
+ private String shardId;
+
+ // record the last shutdown reason we were called with.
+ private ShutdownReason shutdownReason;
+ private ShardSequenceVerifier shardSequenceVerifier;
+ private long numProcessRecordsCallsWithEmptyRecordList;
+
+ public TestStreamlet() {
+
+ }
+
+ public TestStreamlet(Semaphore sem, ShardSequenceVerifier shardSequenceVerifier) {
+ this();
+ this.sem = sem;
+ this.shardSequenceVerifier = shardSequenceVerifier;
+ }
+
+ public List getProcessedRecords() {
+ return records;
+ }
+
+ @Override
+ public void initialize(InitializationInput input) {
+ shardId = input.getShardId();
+ if (shardSequenceVerifier != null) {
+ shardSequenceVerifier.registerInitialization(shardId);
+ }
+ }
+
+ @Override
+ public void processRecords(ProcessRecordsInput input) {
+ List dataRecords = input.getRecords();
+ IRecordProcessorCheckpointer checkpointer = input.getCheckpointer();
+ if ((dataRecords != null) && (!dataRecords.isEmpty())) {
+ for (Record record : dataRecords) {
+ LOG.debug("Processing record: " + record);
+ String seqNum = record.getSequenceNumber();
+ if (!processedSeqNums.contains(seqNum)) {
+ records.add(record);
+ processedSeqNums.add(seqNum);
+ }
+ }
+ }
+ if (dataRecords.isEmpty()) {
+ numProcessRecordsCallsWithEmptyRecordList++;
+ }
+ try {
+ checkpointer.checkpoint();
+ } catch (ThrottlingException | ShutdownException
+ | KinesisClientLibDependencyException | InvalidStateException e) {
+ // Continue processing records and checkpoint next time if we get a transient error.
+ // Don't checkpoint if the processor has been shutdown.
+ LOG.debug("Caught exception while checkpointing: ", e);
+ }
+
+ if (sem != null) {
+ sem.release(dataRecords.size());
+ }
+ }
+
+ @Override
+ public void shutdown(ShutdownInput input) {
+ ShutdownReason reason = input.getShutdownReason();
+ IRecordProcessorCheckpointer checkpointer = input.getCheckpointer();
+ if (shardSequenceVerifier != null) {
+ shardSequenceVerifier.registerShutdown(shardId, reason);
+ }
+ shutdownReason = reason;
+ if (reason.equals(ShutdownReason.TERMINATE)) {
+ try {
+ checkpointer.checkpoint();
+ } catch (KinesisClientLibNonRetryableException e) {
+ LOG.error("Caught exception when checkpointing while shutdown.", e);
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ /**
+ * @return the shardId
+ */
+ String getShardId() {
+ return shardId;
+ }
+
+ /**
+ * @return the shutdownReason
+ */
+ ShutdownReason getShutdownReason() {
+ return shutdownReason;
+ }
+
+ /**
+ * @return the numProcessRecordsCallsWithEmptyRecordList
+ */
+ long getNumProcessRecordsCallsWithEmptyRecordList() {
+ return numProcessRecordsCallsWithEmptyRecordList;
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamletFactory.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamletFactory.java
new file mode 100644
index 00000000..3446f52d
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamletFactory.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Semaphore;
+
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory;
+
+/**
+ * Factory for TestStreamlet record processors.
+ */
+class TestStreamletFactory implements IRecordProcessorFactory {
+
+ // Will be passed to the TestStreamlet. Can be used to check if all records have been processed.
+ private Semaphore semaphore;
+ private ShardSequenceVerifier shardSequenceVerifier;
+ List testStreamlets = new ArrayList<>();
+
+ /**
+ * Constructor.
+ */
+ TestStreamletFactory(Semaphore semaphore, ShardSequenceVerifier shardSequenceVerifier) {
+ this.semaphore = semaphore;
+ this.shardSequenceVerifier = shardSequenceVerifier;
+ }
+
+ @Override
+ public synchronized IRecordProcessor createProcessor() {
+ TestStreamlet processor = new TestStreamlet(semaphore, shardSequenceVerifier);
+ testStreamlets.add(processor);
+ return processor;
+ }
+
+ Semaphore getSemaphore() {
+ return semaphore;
+ }
+
+ ShardSequenceVerifier getShardSequenceVerifier() {
+ return shardSequenceVerifier;
+ }
+
+ /**
+ * @return the testStreamlets
+ */
+ List getTestStreamlets() {
+ return testStreamlets;
+ }
+
+}
diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/WorkerTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/WorkerTest.java
new file mode 100644
index 00000000..e5e21735
--- /dev/null
+++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/WorkerTest.java
@@ -0,0 +1,924 @@
+/*
+ * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.lang.Thread.State;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
+import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibNonRetryableException;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
+import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory;
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker.WorkerCWMetricsFactory;
+import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker.WorkerThreadPoolExecutor;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy;
+import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator;
+import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
+import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
+import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
+import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput;
+import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
+import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager;
+import com.amazonaws.services.kinesis.leases.impl.LeaseManager;
+import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
+import com.amazonaws.services.kinesis.metrics.impl.CWMetricsFactory;
+import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
+import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
+import com.amazonaws.services.kinesis.model.HashKeyRange;
+import com.amazonaws.services.kinesis.model.Record;
+import com.amazonaws.services.kinesis.model.SequenceNumberRange;
+import com.amazonaws.services.kinesis.model.Shard;
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
+
+/**
+ * Unit tests of Worker.
+ */
+public class WorkerTest {
+
+ private static final Log LOG = LogFactory.getLog(WorkerTest.class);
+
+ @Rule
+ public Timeout timeout = new Timeout((int)TimeUnit.SECONDS.toMillis(30));
+
+ private final NullMetricsFactory nullMetricsFactory = new NullMetricsFactory();
+ private final long taskBackoffTimeMillis = 1L;
+ private final long failoverTimeMillis = 5L;
+ private final boolean callProcessRecordsForEmptyRecordList = false;
+ private final long parentShardPollIntervalMillis = 5L;
+ private final long shardSyncIntervalMillis = 5L;
+ private final boolean cleanupLeasesUponShardCompletion = true;
+ // We don't want any of these tests to run checkpoint validation
+ private final boolean skipCheckpointValidationValue = false;
+ private final InitialPositionInStream initialPositionInStream = InitialPositionInStream.LATEST;
+
+ // CHECKSTYLE:IGNORE AnonInnerLengthCheck FOR NEXT 50 LINES
+ private static final com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY =
+ new com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory() {
+
+ @Override
+ public com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor createProcessor() {
+ return new com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor() {
+
+ @Override
+ public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) {
+ if (reason == ShutdownReason.TERMINATE) {
+ try {
+ checkpointer.checkpoint();
+ } catch (KinesisClientLibNonRetryableException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ @Override
+ public void processRecords(List dataRecords, IRecordProcessorCheckpointer checkpointer) {
+ try {
+ checkpointer.checkpoint();
+ } catch (KinesisClientLibNonRetryableException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void initialize(String shardId) {
+ }
+ };
+ }
+ };
+
+ private static final IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY_V2 =
+ new V1ToV2RecordProcessorFactoryAdapter(SAMPLE_RECORD_PROCESSOR_FACTORY);
+
+ /**
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#getApplicationName()}.
+ */
+ @Test
+ public final void testGetStageName() {
+ final String stageName = "testStageName";
+ final KinesisClientLibConfiguration clientConfig =
+ new KinesisClientLibConfiguration(stageName, null, null, null);
+ Worker worker =
+ new Worker(mock(com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory.class),
+ clientConfig);
+ Assert.assertEquals(stageName, worker.getApplicationName());
+ }
+
+ @Test
+ public final void testCreateOrGetShardConsumer() {
+ final String stageName = "testStageName";
+ IRecordProcessorFactory streamletFactory = SAMPLE_RECORD_PROCESSOR_FACTORY_V2;
+ IKinesisProxy proxy = null;
+ ICheckpoint checkpoint = null;
+ int maxRecords = 1;
+ int idleTimeInMilliseconds = 1000;
+ StreamConfig streamConfig =
+ new StreamConfig(proxy,
+ maxRecords,
+ idleTimeInMilliseconds,
+ callProcessRecordsForEmptyRecordList,
+ skipCheckpointValidationValue,
+ initialPositionInStream);
+ final String testConcurrencyToken = "testToken";
+ final String anotherConcurrencyToken = "anotherTestToken";
+ final String dummyKinesisShardId = "kinesis-0-0";
+ ExecutorService execService = null;
+
+ KinesisClientLibLeaseCoordinator leaseCoordinator = mock(KinesisClientLibLeaseCoordinator.class);
+ @SuppressWarnings("unchecked")
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager);
+
+ Worker worker =
+ new Worker(stageName,
+ streamletFactory,
+ streamConfig,
+ InitialPositionInStream.LATEST,
+ parentShardPollIntervalMillis,
+ shardSyncIntervalMillis,
+ cleanupLeasesUponShardCompletion,
+ checkpoint,
+ leaseCoordinator,
+ execService,
+ nullMetricsFactory,
+ taskBackoffTimeMillis,
+ failoverTimeMillis);
+ ShardInfo shardInfo = new ShardInfo(dummyKinesisShardId, testConcurrencyToken, null);
+ ShardConsumer consumer = worker.createOrGetShardConsumer(shardInfo, streamletFactory);
+ Assert.assertNotNull(consumer);
+ ShardConsumer consumer2 = worker.createOrGetShardConsumer(shardInfo, streamletFactory);
+ Assert.assertSame(consumer, consumer2);
+ ShardInfo shardInfoWithSameShardIdButDifferentConcurrencyToken =
+ new ShardInfo(dummyKinesisShardId, anotherConcurrencyToken, null);
+ ShardConsumer consumer3 =
+ worker.createOrGetShardConsumer(shardInfoWithSameShardIdButDifferentConcurrencyToken, streamletFactory);
+ Assert.assertNotNull(consumer3);
+ Assert.assertNotSame(consumer3, consumer);
+ }
+
+ @Test
+ public final void testCleanupShardConsumers() {
+ final String stageName = "testStageName";
+ IRecordProcessorFactory streamletFactory = SAMPLE_RECORD_PROCESSOR_FACTORY_V2;
+ IKinesisProxy proxy = null;
+ ICheckpoint checkpoint = null;
+ int maxRecords = 1;
+ int idleTimeInMilliseconds = 1000;
+ StreamConfig streamConfig =
+ new StreamConfig(proxy,
+ maxRecords,
+ idleTimeInMilliseconds,
+ callProcessRecordsForEmptyRecordList,
+ skipCheckpointValidationValue,
+ initialPositionInStream);
+ final String concurrencyToken = "testToken";
+ final String anotherConcurrencyToken = "anotherTestToken";
+ final String dummyKinesisShardId = "kinesis-0-0";
+ final String anotherDummyKinesisShardId = "kinesis-0-1";
+ ExecutorService execService = null;
+
+ KinesisClientLibLeaseCoordinator leaseCoordinator = mock(KinesisClientLibLeaseCoordinator.class);
+ @SuppressWarnings("unchecked")
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager);
+
+ Worker worker =
+ new Worker(stageName,
+ streamletFactory,
+ streamConfig,
+ InitialPositionInStream.LATEST,
+ parentShardPollIntervalMillis,
+ shardSyncIntervalMillis,
+ cleanupLeasesUponShardCompletion,
+ checkpoint,
+ leaseCoordinator,
+ execService,
+ nullMetricsFactory,
+ taskBackoffTimeMillis,
+ failoverTimeMillis);
+
+ ShardInfo shardInfo1 = new ShardInfo(dummyKinesisShardId, concurrencyToken, null);
+ ShardInfo duplicateOfShardInfo1ButWithAnotherConcurrencyToken =
+ new ShardInfo(dummyKinesisShardId, anotherConcurrencyToken, null);
+ ShardInfo shardInfo2 = new ShardInfo(anotherDummyKinesisShardId, concurrencyToken, null);
+
+ ShardConsumer consumerOfShardInfo1 = worker.createOrGetShardConsumer(shardInfo1, streamletFactory);
+ ShardConsumer consumerOfDuplicateOfShardInfo1ButWithAnotherConcurrencyToken =
+ worker.createOrGetShardConsumer(duplicateOfShardInfo1ButWithAnotherConcurrencyToken, streamletFactory);
+ ShardConsumer consumerOfShardInfo2 = worker.createOrGetShardConsumer(shardInfo2, streamletFactory);
+
+ Set assignedShards = new HashSet();
+ assignedShards.add(shardInfo1);
+ assignedShards.add(shardInfo2);
+ worker.cleanupShardConsumers(assignedShards);
+
+ // verify shard consumer not present in assignedShards is shut down
+ Assert.assertTrue(consumerOfDuplicateOfShardInfo1ButWithAnotherConcurrencyToken.isBeginShutdown());
+ // verify shard consumers present in assignedShards aren't shut down
+ Assert.assertFalse(consumerOfShardInfo1.isBeginShutdown());
+ Assert.assertFalse(consumerOfShardInfo2.isBeginShutdown());
+ }
+
+ @Test
+ public final void testInitializationFailureWithRetries() {
+ String stageName = "testInitializationWorker";
+ IRecordProcessorFactory recordProcessorFactory = new TestStreamletFactory(null, null);
+ IKinesisProxy proxy = mock(IKinesisProxy.class);
+ int count = 0;
+ when(proxy.getShardList()).thenThrow(new RuntimeException(Integer.toString(count++)));
+ int maxRecords = 2;
+ long idleTimeInMilliseconds = 1L;
+ StreamConfig streamConfig =
+ new StreamConfig(proxy,
+ maxRecords,
+ idleTimeInMilliseconds,
+ callProcessRecordsForEmptyRecordList,
+ skipCheckpointValidationValue,
+ initialPositionInStream);
+ KinesisClientLibLeaseCoordinator leaseCoordinator = mock(KinesisClientLibLeaseCoordinator.class);
+ @SuppressWarnings("unchecked")
+ ILeaseManager leaseManager = mock(ILeaseManager.class);
+ when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager);
+ ExecutorService execService = Executors.newSingleThreadExecutor();
+ long shardPollInterval = 0L;
+ Worker worker =
+ new Worker(stageName,
+ recordProcessorFactory,
+ streamConfig,
+ InitialPositionInStream.TRIM_HORIZON,
+ shardPollInterval,
+ shardSyncIntervalMillis,
+ cleanupLeasesUponShardCompletion,
+ leaseCoordinator,
+ leaseCoordinator,
+ execService,
+ nullMetricsFactory,
+ taskBackoffTimeMillis,
+ failoverTimeMillis);
+ worker.run();
+ Assert.assertTrue(count > 0);
+ }
+
+ /**
+ * Runs worker with threadPoolSize == numShards
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
+ */
+ @Test
+ public final void testRunWithThreadPoolSizeEqualToNumShards() throws Exception {
+ final int numShards = 1;
+ final int threadPoolSize = numShards;
+ runAndTestWorker(numShards, threadPoolSize);
+ }
+
+ /**
+ * Runs worker with threadPoolSize < numShards
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
+ */
+ @Test
+ public final void testRunWithThreadPoolSizeLessThanNumShards() throws Exception {
+ final int numShards = 3;
+ final int threadPoolSize = 2;
+ runAndTestWorker(numShards, threadPoolSize);
+ }
+
+ /**
+ * Runs worker with threadPoolSize > numShards
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
+ */
+ @Test
+ public final void testRunWithThreadPoolSizeMoreThanNumShards() throws Exception {
+ final int numShards = 3;
+ final int threadPoolSize = 5;
+ runAndTestWorker(numShards, threadPoolSize);
+ }
+
+ /**
+ * Runs worker with threadPoolSize < numShards
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
+ */
+ @Test
+ public final void testOneSplitShard2Threads() throws Exception {
+ final int threadPoolSize = 2;
+ final int numberOfRecordsPerShard = 10;
+ List shardList = createShardListWithOneSplit();
+ List initialLeases = new ArrayList();
+ KinesisClientLease lease = ShardSyncer.newKCLLease(shardList.get(0));
+ lease.setCheckpoint(new ExtendedSequenceNumber("2"));
+ initialLeases.add(lease);
+ runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard);
+ }
+
+ /**
+ * Runs worker with threadPoolSize < numShards
+ * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
+ */
+ @Test
+ public final void testOneSplitShard2ThreadsWithCallsForEmptyRecords() throws Exception {
+ final int threadPoolSize = 2;
+ final int numberOfRecordsPerShard = 10;
+ List