Added Unit, and Integration Tests
Added unit tests for many of the classes in the client. Added new dependencies for the unit tests to the POM file. Unit tests can be executed the `surefire:test` goal, and are executed during the test phase. Added integration tests for the client. The tests require access to credentials that can interact with Kinesis, and DynamoDB. The integration tests can be run by executing the `failsafe:integration-test` goal, or are run as part of the integration-test phase **Resources in the integration tests are not currently released on completion of the integration tests.**
This commit is contained in:
parent
dd14bac4e3
commit
33cec94530
55 changed files with 10415 additions and 95 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -1,2 +1,3 @@
|
||||||
target/
|
target/
|
||||||
AwsCredentials.properties
|
AwsCredentials.properties
|
||||||
|
.idea
|
||||||
|
|
|
||||||
258
pom.xml
258
pom.xml
|
|
@ -1,112 +1,180 @@
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd">
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd">
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>com.amazonaws</groupId>
|
<groupId>com.amazonaws</groupId>
|
||||||
<artifactId>amazon-kinesis-client</artifactId>
|
<artifactId>amazon-kinesis-client</artifactId>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<name>Amazon Kinesis Client Library for Java</name>
|
<name>Amazon Kinesis Client Library for Java</name>
|
||||||
<version>1.6.4</version>
|
<version>1.6.4</version>
|
||||||
<description>The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data from Amazon Kinesis.</description>
|
<description>The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data
|
||||||
<url>https://aws.amazon.com/kinesis</url>
|
from Amazon Kinesis.
|
||||||
|
</description>
|
||||||
|
<url>https://aws.amazon.com/kinesis</url>
|
||||||
|
|
||||||
<scm>
|
<scm>
|
||||||
<url>https://github.com/awslabs/amazon-kinesis-client.git</url>
|
<url>https://github.com/awslabs/amazon-kinesis-client.git</url>
|
||||||
</scm>
|
</scm>
|
||||||
|
|
||||||
<licenses>
|
<licenses>
|
||||||
<license>
|
<license>
|
||||||
<name>Amazon Software License</name>
|
<name>Amazon Software License</name>
|
||||||
<url>https://aws.amazon.com/asl</url>
|
<url>https://aws.amazon.com/asl</url>
|
||||||
<distribution>repo</distribution>
|
<distribution>repo</distribution>
|
||||||
</license>
|
</license>
|
||||||
</licenses>
|
</licenses>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<aws-java-sdk.version>1.11.14</aws-java-sdk.version>
|
<aws-java-sdk.version>1.11.14</aws-java-sdk.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.amazonaws</groupId>
|
<groupId>com.amazonaws</groupId>
|
||||||
<artifactId>aws-java-sdk-core</artifactId>
|
<artifactId>aws-java-sdk-core</artifactId>
|
||||||
<version>${aws-java-sdk.version}</version>
|
<version>${aws-java-sdk.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.amazonaws</groupId>
|
<groupId>com.amazonaws</groupId>
|
||||||
<artifactId>aws-java-sdk-dynamodb</artifactId>
|
<artifactId>aws-java-sdk-dynamodb</artifactId>
|
||||||
<version>${aws-java-sdk.version}</version>
|
<version>${aws-java-sdk.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.amazonaws</groupId>
|
<groupId>com.amazonaws</groupId>
|
||||||
<artifactId>aws-java-sdk-kinesis</artifactId>
|
<artifactId>aws-java-sdk-kinesis</artifactId>
|
||||||
<version>${aws-java-sdk.version}</version>
|
<version>${aws-java-sdk.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.amazonaws</groupId>
|
<groupId>com.amazonaws</groupId>
|
||||||
<artifactId>aws-java-sdk-cloudwatch</artifactId>
|
<artifactId>aws-java-sdk-cloudwatch</artifactId>
|
||||||
<version>${aws-java-sdk.version}</version>
|
<version>${aws-java-sdk.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
<version>18.0</version>
|
<version>18.0</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.protobuf</groupId>
|
<groupId>com.google.protobuf</groupId>
|
||||||
<artifactId>protobuf-java</artifactId>
|
<artifactId>protobuf-java</artifactId>
|
||||||
<version>2.6.1</version>
|
<version>2.6.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>commons-lang</groupId>
|
<groupId>commons-lang</groupId>
|
||||||
<artifactId>commons-lang</artifactId>
|
<artifactId>commons-lang</artifactId>
|
||||||
<version>2.6</version>
|
<version>2.6</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
|
||||||
|
|
||||||
<developers>
|
<!-- Test -->
|
||||||
<developer>
|
<dependency>
|
||||||
<id>amazonwebservices</id>
|
<groupId>junit</groupId>
|
||||||
<organization>Amazon Web Services</organization>
|
<artifactId>junit</artifactId>
|
||||||
<organizationUrl>https://aws.amazon.com</organizationUrl>
|
<version>4.11</version>
|
||||||
<roles>
|
<scope>test</scope>
|
||||||
<role>developer</role>
|
</dependency>
|
||||||
</roles>
|
|
||||||
</developer>
|
|
||||||
</developers>
|
|
||||||
|
|
||||||
<build>
|
<dependency>
|
||||||
<pluginManagement>
|
<groupId>org.mockito</groupId>
|
||||||
<plugins>
|
<artifactId>mockito-all</artifactId>
|
||||||
<plugin>
|
<version>1.10.19</version>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<scope>test</scope>
|
||||||
<artifactId>maven-compiler-plugin</artifactId>
|
</dependency>
|
||||||
<version>3.2</version>
|
|
||||||
<configuration>
|
|
||||||
<source>1.7</source>
|
|
||||||
<target>1.7</target>
|
|
||||||
<encoding>UTF-8</encoding>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
|
||||||
</pluginManagement>
|
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.hamcrest</groupId>
|
||||||
|
<artifactId>hamcrest-all</artifactId>
|
||||||
|
<version>1.3</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.amazonaws</groupId>
|
||||||
|
<artifactId>DynamoDBLocal</artifactId>
|
||||||
|
<version>1.10.5.1</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<repositories>
|
||||||
|
<repository>
|
||||||
|
<id>dynamodb-local</id>
|
||||||
|
<name>DynamoDB Local Release Repository</name>
|
||||||
|
<url>http://dynamodb-local.s3-website-us-west-2.amazonaws.com/release</url>
|
||||||
|
</repository>
|
||||||
|
</repositories>
|
||||||
|
|
||||||
|
<developers>
|
||||||
|
<developer>
|
||||||
|
<id>amazonwebservices</id>
|
||||||
|
<organization>Amazon Web Services</organization>
|
||||||
|
<organizationUrl>https://aws.amazon.com</organizationUrl>
|
||||||
|
<roles>
|
||||||
|
<role>developer</role>
|
||||||
|
</roles>
|
||||||
|
</developer>
|
||||||
|
</developers>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<pluginManagement>
|
||||||
<plugins>
|
<plugins>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-gpg-plugin</artifactId>
|
<artifactId>maven-compiler-plugin</artifactId>
|
||||||
<version>1.5</version>
|
<version>3.2</version>
|
||||||
<executions>
|
<configuration>
|
||||||
<execution>
|
<source>1.7</source>
|
||||||
<id>sign-artifacts</id>
|
<target>1.7</target>
|
||||||
<phase>verify</phase>
|
<encoding>UTF-8</encoding>
|
||||||
<goals>
|
</configuration>
|
||||||
<goal>sign</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</pluginManagement>
|
||||||
|
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-gpg-plugin</artifactId>
|
||||||
|
<version>1.5</version>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>sign-artifacts</id>
|
||||||
|
<phase>verify</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>sign</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
|
<version>2.19.1</version>
|
||||||
|
<configuration>
|
||||||
|
<excludes>
|
||||||
|
<exclude>**/*IntegrationTest.java</exclude>
|
||||||
|
</excludes>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-failsafe-plugin</artifactId>
|
||||||
|
<version>2.19.1</version>
|
||||||
|
<configuration>
|
||||||
|
<includes>
|
||||||
|
<include>**/*IntegrationTest.java</include>
|
||||||
|
</includes>
|
||||||
|
</configuration>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<goals>
|
||||||
|
<goal>integration-test</goal>
|
||||||
|
<goal>verify</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
|
||||||
|
</build>
|
||||||
|
|
||||||
</project>
|
</project>
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,369 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.config;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.StringUtils;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.auth.AWSCredentials;
|
||||||
|
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
|
||||||
|
public class KinesisClientLibConfiguratorTest {
|
||||||
|
|
||||||
|
private String credentialName1 =
|
||||||
|
"com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProvider";
|
||||||
|
private String credentialName2 =
|
||||||
|
"com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfiguratorTest$AlwaysFailCredentialsProvider";
|
||||||
|
private KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithBasicSetup() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
getConfiguration(StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = " + credentialName1,
|
||||||
|
"workerId = 123"
|
||||||
|
}, '\n'));
|
||||||
|
assertEquals(config.getApplicationName(), "b");
|
||||||
|
assertEquals(config.getStreamName(), "a");
|
||||||
|
assertEquals(config.getWorkerIdentifier(), "123");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithLongVariables() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
getConfiguration(StringUtils.join(new String[] {
|
||||||
|
"applicationName = app",
|
||||||
|
"streamName = 123",
|
||||||
|
"AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
||||||
|
"workerId = 123",
|
||||||
|
"failoverTimeMillis = 100",
|
||||||
|
"shardSyncIntervalMillis = 500"
|
||||||
|
}, '\n'));
|
||||||
|
|
||||||
|
assertEquals(config.getApplicationName(), "app");
|
||||||
|
assertEquals(config.getStreamName(), "123");
|
||||||
|
assertEquals(config.getWorkerIdentifier(), "123");
|
||||||
|
assertEquals(config.getFailoverTimeMillis(), 100);
|
||||||
|
assertEquals(config.getShardSyncIntervalMillis(), 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithUnsupportedClientConfigurationVariables() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
getConfiguration(StringUtils.join(new String[] {
|
||||||
|
"AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
||||||
|
"workerId = id",
|
||||||
|
"kinesisClientConfig = {}",
|
||||||
|
"streamName = stream",
|
||||||
|
"applicationName = b"
|
||||||
|
}, '\n'));
|
||||||
|
|
||||||
|
assertEquals(config.getApplicationName(), "b");
|
||||||
|
assertEquals(config.getStreamName(), "stream");
|
||||||
|
assertEquals(config.getWorkerIdentifier(), "id");
|
||||||
|
// by setting the configuration there is no effect on kinesisClientConfiguration variable.
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithIntVariables() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
getConfiguration(StringUtils.join(new String[] {
|
||||||
|
"streamName = kinesis",
|
||||||
|
"AWSCredentialsProvider = " + credentialName2 + ", " + credentialName1,
|
||||||
|
"workerId = w123",
|
||||||
|
"maxRecords = 10",
|
||||||
|
"metricsMaxQueueSize = 20",
|
||||||
|
"applicationName = kinesis"
|
||||||
|
}, '\n'));
|
||||||
|
|
||||||
|
assertEquals(config.getApplicationName(), "kinesis");
|
||||||
|
assertEquals(config.getStreamName(), "kinesis");
|
||||||
|
assertEquals(config.getWorkerIdentifier(), "w123");
|
||||||
|
assertEquals(config.getMaxRecords(), 10);
|
||||||
|
assertEquals(config.getMetricsMaxQueueSize(), 20);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithBooleanVariables() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
getConfiguration(StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = ABCD, " + credentialName1,
|
||||||
|
"workerId = 0",
|
||||||
|
"cleanupLeasesUponShardCompletion = false",
|
||||||
|
"validateSequenceNumberBeforeCheckpointing = true"
|
||||||
|
}, '\n'));
|
||||||
|
|
||||||
|
assertEquals(config.getApplicationName(), "b");
|
||||||
|
assertEquals(config.getStreamName(), "a");
|
||||||
|
assertEquals(config.getWorkerIdentifier(), "0");
|
||||||
|
assertFalse(config.shouldCleanupLeasesUponShardCompletion());
|
||||||
|
assertTrue(config.shouldValidateSequenceNumberBeforeCheckpointing());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithStringVariables() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
getConfiguration(StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = ABCD," + credentialName1,
|
||||||
|
"workerId = 1",
|
||||||
|
"kinesisEndpoint = https://kinesis",
|
||||||
|
"metricsLevel = SUMMARY"
|
||||||
|
}, '\n'));
|
||||||
|
|
||||||
|
assertEquals(config.getWorkerIdentifier(), "1");
|
||||||
|
assertEquals(config.getKinesisEndpoint(), "https://kinesis");
|
||||||
|
assertEquals(config.getMetricsLevel(), MetricsLevel.SUMMARY);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithSetVariables() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
getConfiguration(StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = ABCD," + credentialName1,
|
||||||
|
"workerId = 1",
|
||||||
|
"metricsEnabledDimensions = ShardId, WorkerIdentifier"
|
||||||
|
}, '\n'));
|
||||||
|
|
||||||
|
Set<String> expectedMetricsEnabledDimensions = ImmutableSet.<String>builder().add(
|
||||||
|
"ShardId", "WorkerIdentifier").addAll(
|
||||||
|
KinesisClientLibConfiguration.METRICS_ALWAYS_ENABLED_DIMENSIONS).build();
|
||||||
|
assertEquals(config.getMetricsEnabledDimensions(), expectedMetricsEnabledDimensions);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithInitialPositionInStreamVariables() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
getConfiguration(StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = ABCD," + credentialName1,
|
||||||
|
"workerId = 123",
|
||||||
|
"initialPositionInStream = TriM_Horizon"
|
||||||
|
}, '\n'));
|
||||||
|
|
||||||
|
assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSkippingNonKCLVariables() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
getConfiguration(StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = ABCD," + credentialName1,
|
||||||
|
"workerId = 123",
|
||||||
|
"initialPositionInStream = TriM_Horizon",
|
||||||
|
"abc = 1"
|
||||||
|
}, '\n'));
|
||||||
|
|
||||||
|
assertEquals(config.getApplicationName(), "b");
|
||||||
|
assertEquals(config.getStreamName(), "a");
|
||||||
|
assertEquals(config.getWorkerIdentifier(), "123");
|
||||||
|
assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithInvalidIntValue() {
|
||||||
|
String test = StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = " + credentialName1,
|
||||||
|
"workerId = 123",
|
||||||
|
"failoverTimeMillis = 100nf"
|
||||||
|
}, '\n');
|
||||||
|
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||||
|
|
||||||
|
try {
|
||||||
|
configurator.getConfiguration(input);
|
||||||
|
} catch (Exception e) {
|
||||||
|
fail("Don't expect to fail on invalid variable value");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithNegativeIntValue() {
|
||||||
|
String test = StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = " + credentialName1,
|
||||||
|
"workerId = 123",
|
||||||
|
"failoverTimeMillis = -12"
|
||||||
|
}, '\n');
|
||||||
|
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||||
|
|
||||||
|
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||||
|
try {
|
||||||
|
configurator.getConfiguration(input);
|
||||||
|
} catch (Exception e) {
|
||||||
|
fail("Don't expect to fail on invalid variable value");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithMissingCredentialsProvider() {
|
||||||
|
String test = StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"workerId = 123",
|
||||||
|
"failoverTimeMillis = 100",
|
||||||
|
"shardSyncIntervalMillis = 500"
|
||||||
|
}, '\n');
|
||||||
|
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||||
|
|
||||||
|
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||||
|
try {
|
||||||
|
configurator.getConfiguration(input);
|
||||||
|
fail("expect failure with no credentials provider variables");
|
||||||
|
} catch (Exception e) {
|
||||||
|
// succeed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithMissingWorkerId() {
|
||||||
|
String test = StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = " + credentialName1,
|
||||||
|
"failoverTimeMillis = 100",
|
||||||
|
"shardSyncIntervalMillis = 500"
|
||||||
|
}, '\n');
|
||||||
|
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||||
|
KinesisClientLibConfiguration config = configurator.getConfiguration(input);
|
||||||
|
|
||||||
|
// if workerId is not provided, configurator should assign one for it automatically
|
||||||
|
assertNotNull(config.getWorkerIdentifier());
|
||||||
|
assertFalse(config.getWorkerIdentifier().isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithMissingStreamName() {
|
||||||
|
String test = StringUtils.join(new String[] {
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = " + credentialName1,
|
||||||
|
"workerId = 123",
|
||||||
|
"failoverTimeMillis = 100"
|
||||||
|
}, '\n');
|
||||||
|
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||||
|
|
||||||
|
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||||
|
try {
|
||||||
|
configurator.getConfiguration(input);
|
||||||
|
fail("expect failure with no stream name variables");
|
||||||
|
} catch (Exception e) {
|
||||||
|
// succeed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithMissingApplicationName() {
|
||||||
|
String test = StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"AWSCredentialsProvider = " + credentialName1,
|
||||||
|
"workerId = 123",
|
||||||
|
"failoverTimeMillis = 100"
|
||||||
|
}, '\n');
|
||||||
|
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||||
|
|
||||||
|
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||||
|
try {
|
||||||
|
configurator.getConfiguration(input);
|
||||||
|
fail("expect failure with no application variables");
|
||||||
|
} catch (Exception e) {
|
||||||
|
// succeed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWithAWSCredentialsFailed() {
|
||||||
|
String test = StringUtils.join(new String[] {
|
||||||
|
"streamName = a",
|
||||||
|
"applicationName = b",
|
||||||
|
"AWSCredentialsProvider = " + credentialName2,
|
||||||
|
"failoverTimeMillis = 100",
|
||||||
|
"shardSyncIntervalMillis = 500"
|
||||||
|
}, '\n');
|
||||||
|
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||||
|
|
||||||
|
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||||
|
try {
|
||||||
|
KinesisClientLibConfiguration config = configurator.getConfiguration(input);
|
||||||
|
config.getKinesisCredentialsProvider().getCredentials();
|
||||||
|
fail("expect failure with wrong credentials provider");
|
||||||
|
} catch (Exception e) {
|
||||||
|
// succeed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This credentials provider will always succeed
|
||||||
|
*/
|
||||||
|
public static class AlwaysSucceedCredentialsProvider implements AWSCredentialsProvider {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AWSCredentials getCredentials() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void refresh() {
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This credentials provider will always fail
|
||||||
|
*/
|
||||||
|
public static class AlwaysFailCredentialsProvider implements AWSCredentialsProvider {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AWSCredentials getCredentials() {
|
||||||
|
throw new IllegalArgumentException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void refresh() {
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private KinesisClientLibConfiguration getConfiguration(String configString) {
|
||||||
|
InputStream input = new ByteArrayInputStream(configString.getBytes());
|
||||||
|
KinesisClientLibConfiguration config = configurator.getConfiguration(input);
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,112 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint;
|
||||||
|
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base class for unit testing checkpoint implementations.
|
||||||
|
* This class has tests common to InMemory and FileBased implementations.
|
||||||
|
*/
|
||||||
|
public abstract class CheckpointImplTestBase {
|
||||||
|
|
||||||
|
protected final String startingSequenceNumber = "0001000";
|
||||||
|
protected final String testConcurrencyToken = "testToken";
|
||||||
|
protected ICheckpoint checkpoint;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
MetricsHelper.startScope(new NullMetricsFactory(), "CheckpointImplTestBase");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
*/
|
||||||
|
public CheckpointImplTestBase() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testInitialSetCheckpoint() throws Exception {
|
||||||
|
String sequenceNumber = "1";
|
||||||
|
String shardId = "myShardId";
|
||||||
|
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber);
|
||||||
|
checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken);
|
||||||
|
ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId);
|
||||||
|
Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testAdvancingSetCheckpoint() throws Exception {
|
||||||
|
String shardId = "myShardId";
|
||||||
|
for (Integer i = 0; i < 10; i++) {
|
||||||
|
String sequenceNumber = i.toString();
|
||||||
|
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber);
|
||||||
|
checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken);
|
||||||
|
ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId);
|
||||||
|
Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method to verify setCheckpoint and getCheckpoint methods.
|
||||||
|
*
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testSetAndGetCheckpoint() throws Exception {
|
||||||
|
String checkpointValue = "12345";
|
||||||
|
String shardId = "testShardId-1";
|
||||||
|
String concurrencyToken = "token-1";
|
||||||
|
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(checkpointValue);
|
||||||
|
checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken);
|
||||||
|
Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,123 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Everything is stored in memory and there is no fault-tolerance.
|
||||||
|
*/
|
||||||
|
public class InMemoryCheckpointImpl implements ICheckpoint {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(InMemoryCheckpointImpl.class);
|
||||||
|
|
||||||
|
private Map<String, ExtendedSequenceNumber> checkpoints = new HashMap<>();
|
||||||
|
private Map<String, ExtendedSequenceNumber> flushpoints = new HashMap<>();
|
||||||
|
private final String startingSequenceNumber;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
*
|
||||||
|
* @param startingSequenceNumber Initial checkpoint will be set to this sequenceNumber (for all shards).
|
||||||
|
*/
|
||||||
|
public InMemoryCheckpointImpl(String startingSequenceNumber) {
|
||||||
|
super();
|
||||||
|
this.startingSequenceNumber = startingSequenceNumber;
|
||||||
|
}
|
||||||
|
|
||||||
|
ExtendedSequenceNumber getLastCheckpoint(String shardId) {
|
||||||
|
ExtendedSequenceNumber checkpoint = checkpoints.get(shardId);
|
||||||
|
if (checkpoint == null) {
|
||||||
|
checkpoint = new ExtendedSequenceNumber(startingSequenceNumber);
|
||||||
|
}
|
||||||
|
LOG.debug("getLastCheckpoint shardId: " + shardId + " checkpoint: " + checkpoint);
|
||||||
|
return checkpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
ExtendedSequenceNumber getLastFlushpoint(String shardId) {
|
||||||
|
ExtendedSequenceNumber flushpoint = flushpoints.get(shardId);
|
||||||
|
LOG.debug("getLastFlushpoint shardId: " + shardId + " flushpoint: " + flushpoint);
|
||||||
|
return flushpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
void resetCheckpointToLastFlushpoint(String shardId) throws KinesisClientLibException {
|
||||||
|
ExtendedSequenceNumber currentFlushpoint = flushpoints.get(shardId);
|
||||||
|
if (currentFlushpoint == null) {
|
||||||
|
checkpoints.put(shardId, new ExtendedSequenceNumber(startingSequenceNumber));
|
||||||
|
} else {
|
||||||
|
checkpoints.put(shardId, currentFlushpoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ExtendedSequenceNumber getGreatestPrimaryFlushpoint(String shardId) throws KinesisClientLibException {
|
||||||
|
verifyNotEmpty(shardId, "shardId must not be null.");
|
||||||
|
ExtendedSequenceNumber greatestFlushpoint = getLastFlushpoint(shardId);
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("getGreatestPrimaryFlushpoint value for shardId " + shardId + " = " + greatestFlushpoint);
|
||||||
|
}
|
||||||
|
return greatestFlushpoint;
|
||||||
|
};
|
||||||
|
|
||||||
|
ExtendedSequenceNumber getRestartPoint(String shardId) {
|
||||||
|
verifyNotEmpty(shardId, "shardId must not be null.");
|
||||||
|
ExtendedSequenceNumber restartPoint = getLastCheckpoint(shardId);
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("getRestartPoint value for shardId " + shardId + " = " + restartPoint);
|
||||||
|
}
|
||||||
|
return restartPoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritDoc}
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void setCheckpoint(String shardId, ExtendedSequenceNumber checkpointValue, String concurrencyToken)
|
||||||
|
throws KinesisClientLibException {
|
||||||
|
checkpoints.put(shardId, checkpointValue);
|
||||||
|
flushpoints.put(shardId, checkpointValue);
|
||||||
|
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("shardId: " + shardId + " checkpoint: " + checkpointValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritDoc}
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ExtendedSequenceNumber getCheckpoint(String shardId) throws KinesisClientLibException {
|
||||||
|
ExtendedSequenceNumber checkpoint = flushpoints.get(shardId);
|
||||||
|
LOG.debug("getCheckpoint shardId: " + shardId + " checkpoint: " + checkpoint);
|
||||||
|
return checkpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Check that string is neither null nor empty.
|
||||||
|
*/
|
||||||
|
static void verifyNotEmpty(String string, String message) {
|
||||||
|
if ((string == null) || (string.isEmpty())) {
|
||||||
|
throw new IllegalArgumentException(message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint;
|
||||||
|
|
||||||
|
import org.junit.Before;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test the InMemoryCheckpointImplTest class.
|
||||||
|
*/
|
||||||
|
public class InMemoryCheckpointImplTest extends CheckpointImplTestBase {
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
*/
|
||||||
|
public InMemoryCheckpointImplTest() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
checkpoint = new InMemoryCheckpointImpl(startingSequenceNumber);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,223 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class BlockOnParentShardTaskTest {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(BlockOnParentShardTaskTest.class);
|
||||||
|
private final long backoffTimeInMillis = 50L;
|
||||||
|
private final String shardId = "shardId-97";
|
||||||
|
private final String concurrencyToken = "testToken";
|
||||||
|
private final List<String> emptyParentShardIds = new ArrayList<String>();
|
||||||
|
ShardInfo defaultShardInfo = new ShardInfo(shardId, concurrencyToken, emptyParentShardIds);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test call() when there are no parent shards.
|
||||||
|
* @throws ProvisionedThroughputException
|
||||||
|
* @throws InvalidStateException
|
||||||
|
* @throws DependencyException
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCallNoParents()
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseManager.getLease(shardId)).thenReturn(null);
|
||||||
|
|
||||||
|
BlockOnParentShardTask task = new BlockOnParentShardTask(defaultShardInfo, leaseManager, backoffTimeInMillis);
|
||||||
|
TaskResult result = task.call();
|
||||||
|
Assert.assertNull(result.getException());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test call() when there are 1-2 parent shards that have been fully processed.
|
||||||
|
* @throws ProvisionedThroughputException
|
||||||
|
* @throws InvalidStateException
|
||||||
|
* @throws DependencyException
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCallWhenParentsHaveFinished()
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
|
||||||
|
ShardInfo shardInfo = null;
|
||||||
|
BlockOnParentShardTask task = null;
|
||||||
|
String parent1ShardId = "shardId-1";
|
||||||
|
String parent2ShardId = "shardId-2";
|
||||||
|
List<String> parentShardIds = new ArrayList<>();
|
||||||
|
TaskResult result = null;
|
||||||
|
|
||||||
|
KinesisClientLease parent1Lease = new KinesisClientLease();
|
||||||
|
parent1Lease.setCheckpoint(ExtendedSequenceNumber.SHARD_END);
|
||||||
|
KinesisClientLease parent2Lease = new KinesisClientLease();
|
||||||
|
parent2Lease.setCheckpoint(ExtendedSequenceNumber.SHARD_END);
|
||||||
|
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseManager.getLease(parent1ShardId)).thenReturn(parent1Lease);
|
||||||
|
when(leaseManager.getLease(parent2ShardId)).thenReturn(parent2Lease);
|
||||||
|
|
||||||
|
// test single parent
|
||||||
|
parentShardIds.add(parent1ShardId);
|
||||||
|
shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
|
||||||
|
task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
|
||||||
|
result = task.call();
|
||||||
|
Assert.assertNull(result.getException());
|
||||||
|
|
||||||
|
// test two parents
|
||||||
|
parentShardIds.add(parent2ShardId);
|
||||||
|
shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
|
||||||
|
task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
|
||||||
|
result = task.call();
|
||||||
|
Assert.assertNull(result.getException());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test call() when there are 1-2 parent shards that have NOT been fully processed.
|
||||||
|
* @throws ProvisionedThroughputException
|
||||||
|
* @throws InvalidStateException
|
||||||
|
* @throws DependencyException
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCallWhenParentsHaveNotFinished()
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
|
||||||
|
ShardInfo shardInfo = null;
|
||||||
|
BlockOnParentShardTask task = null;
|
||||||
|
String parent1ShardId = "shardId-1";
|
||||||
|
String parent2ShardId = "shardId-2";
|
||||||
|
List<String> parentShardIds = new ArrayList<>();
|
||||||
|
TaskResult result = null;
|
||||||
|
|
||||||
|
KinesisClientLease parent1Lease = new KinesisClientLease();
|
||||||
|
parent1Lease.setCheckpoint(ExtendedSequenceNumber.LATEST);
|
||||||
|
KinesisClientLease parent2Lease = new KinesisClientLease();
|
||||||
|
// mock a sequence number checkpoint
|
||||||
|
parent2Lease.setCheckpoint(new ExtendedSequenceNumber("98182584034"));
|
||||||
|
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseManager.getLease(parent1ShardId)).thenReturn(parent1Lease);
|
||||||
|
when(leaseManager.getLease(parent2ShardId)).thenReturn(parent2Lease);
|
||||||
|
|
||||||
|
// test single parent
|
||||||
|
parentShardIds.add(parent1ShardId);
|
||||||
|
shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
|
||||||
|
task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
|
||||||
|
result = task.call();
|
||||||
|
Assert.assertNotNull(result.getException());
|
||||||
|
|
||||||
|
// test two parents
|
||||||
|
parentShardIds.add(parent2ShardId);
|
||||||
|
shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
|
||||||
|
task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
|
||||||
|
result = task.call();
|
||||||
|
Assert.assertNotNull(result.getException());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test call() with 1 parent shard before and after it is completely processed.
|
||||||
|
* @throws ProvisionedThroughputException
|
||||||
|
* @throws InvalidStateException
|
||||||
|
* @throws DependencyException
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCallBeforeAndAfterAParentFinishes()
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
|
||||||
|
BlockOnParentShardTask task = null;
|
||||||
|
String parentShardId = "shardId-1";
|
||||||
|
List<String> parentShardIds = new ArrayList<>();
|
||||||
|
parentShardIds.add(parentShardId);
|
||||||
|
ShardInfo shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds);
|
||||||
|
TaskResult result = null;
|
||||||
|
KinesisClientLease parentLease = new KinesisClientLease();
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseManager.getLease(parentShardId)).thenReturn(parentLease);
|
||||||
|
|
||||||
|
// test when parent shard has not yet been fully processed
|
||||||
|
parentLease.setCheckpoint(new ExtendedSequenceNumber("98182584034"));
|
||||||
|
task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
|
||||||
|
result = task.call();
|
||||||
|
Assert.assertNotNull(result.getException());
|
||||||
|
|
||||||
|
// test when parent has been fully processed
|
||||||
|
parentLease.setCheckpoint(ExtendedSequenceNumber.SHARD_END);
|
||||||
|
task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis);
|
||||||
|
result = task.call();
|
||||||
|
Assert.assertNull(result.getException());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test to verify we return the right task type.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testGetTaskType() {
|
||||||
|
BlockOnParentShardTask task = new BlockOnParentShardTask(defaultShardInfo, null, backoffTimeInMillis);
|
||||||
|
Assert.assertEquals(TaskType.BLOCK_ON_PARENT_SHARDS, task.getTaskType());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,79 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
|
||||||
|
|
||||||
|
public class CheckpointValueComparatorTest {
|
||||||
|
@Test
|
||||||
|
public final void testCheckpointValueComparator() {
|
||||||
|
CheckpointValueComparator comparator = new CheckpointValueComparator();
|
||||||
|
final String trimHorizon = SentinelCheckpoint.TRIM_HORIZON.toString();
|
||||||
|
final String latest = SentinelCheckpoint.LATEST.toString();
|
||||||
|
final String shardEnd = SentinelCheckpoint.SHARD_END.toString();
|
||||||
|
final String lesser = "17";
|
||||||
|
final String greater = "123";
|
||||||
|
final String notASentinelCheckpointValue = "just-some-string";
|
||||||
|
|
||||||
|
String[][] equalValues =
|
||||||
|
{ { trimHorizon, trimHorizon }, { latest, latest }, { greater, greater }, { shardEnd, shardEnd } };
|
||||||
|
|
||||||
|
// Check equal values
|
||||||
|
for (String[] pair : equalValues) {
|
||||||
|
Assert.assertTrue("Expected: " + pair[0] + " and " + pair[1] + " to be equal",
|
||||||
|
comparator.compare(pair[0], pair[1]) == 0 && comparator.compare(pair[1], pair[0]) == 0);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check non-equal values
|
||||||
|
String[][] lessThanValues =
|
||||||
|
{ { latest, lesser }, { trimHorizon, greater }, { lesser, greater },
|
||||||
|
{ trimHorizon, shardEnd }, { latest, shardEnd }, { lesser, shardEnd }, { trimHorizon, latest } };
|
||||||
|
for (String[] pair : lessThanValues) {
|
||||||
|
Assert.assertTrue("Expected: " + pair[0] + " < " + pair[1],
|
||||||
|
comparator.compare(pair[0], pair[1]) < 0);
|
||||||
|
Assert.assertTrue("Expected: " + pair[1] + " > " + pair[0],
|
||||||
|
comparator.compare(pair[1], pair[0]) > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check bad values
|
||||||
|
String[][] badValues =
|
||||||
|
{ { null, null }, { latest, null }, { null, trimHorizon }, { null, shardEnd }, { null, lesser },
|
||||||
|
{ null, notASentinelCheckpointValue }, { latest, notASentinelCheckpointValue },
|
||||||
|
{ notASentinelCheckpointValue, trimHorizon }, { shardEnd, notASentinelCheckpointValue },
|
||||||
|
{ notASentinelCheckpointValue, lesser } };
|
||||||
|
for (String[] pair : badValues) {
|
||||||
|
try {
|
||||||
|
comparator.compare(pair[0], pair[1]);
|
||||||
|
Assert.fail("Compare should have thrown an exception when one of its parameters is not a sequence "
|
||||||
|
+ "number and not a sentinel checkpoint value but didn't when comparing " + pair[0] + " and "
|
||||||
|
+ pair[1]);
|
||||||
|
} catch (Exception e1) {
|
||||||
|
try {
|
||||||
|
comparator.compare(pair[1], pair[0]);
|
||||||
|
Assert.fail("Compare should have thrown an exception when one of its parameters is not a sequence "
|
||||||
|
+ "number and not a sentinel checkpoint value but didn't when comparing " + pair[1]
|
||||||
|
+ " and " + pair[0]);
|
||||||
|
} catch (Exception e2) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,215 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Mock Lease Manager by randomly throwing Leasing Exceptions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
class ExceptionThrowingLeaseManager implements ILeaseManager<KinesisClientLease> {
|
||||||
|
private static final Log LOG = LogFactory.getLog(ExceptionThrowingLeaseManager.class);
|
||||||
|
private static final Throwable EXCEPTION_MSG = new Throwable("Test Exception");
|
||||||
|
|
||||||
|
// Use array below to control in what situations we want to throw exceptions.
|
||||||
|
private int[] leaseManagerMethodCallingCount;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Methods which we support (simulate exceptions).
|
||||||
|
*/
|
||||||
|
enum ExceptionThrowingLeaseManagerMethods {
|
||||||
|
CREATELEASETABLEIFNOTEXISTS(0),
|
||||||
|
LEASETABLEEXISTS(1),
|
||||||
|
WAITUNTILLEASETABLEEXISTS(2),
|
||||||
|
LISTLEASES(3),
|
||||||
|
CREATELEASEIFNOTEXISTS(4),
|
||||||
|
GETLEASE(5),
|
||||||
|
RENEWLEASE(6),
|
||||||
|
TAKELEASE(7),
|
||||||
|
EVICTLEASE(8),
|
||||||
|
DELETELEASE(9),
|
||||||
|
DELETEALL(10),
|
||||||
|
UPDATELEASE(11),
|
||||||
|
NONE(Integer.MIN_VALUE);
|
||||||
|
|
||||||
|
private Integer index;
|
||||||
|
|
||||||
|
ExceptionThrowingLeaseManagerMethods(Integer index) {
|
||||||
|
this.index = index;
|
||||||
|
}
|
||||||
|
|
||||||
|
Integer getIndex() {
|
||||||
|
return this.index;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define which method should throw exception and when it should throw exception.
|
||||||
|
private ExceptionThrowingLeaseManagerMethods methodThrowingException = ExceptionThrowingLeaseManagerMethods.NONE;
|
||||||
|
private int timeThrowingException = Integer.MAX_VALUE;
|
||||||
|
|
||||||
|
// The real local lease manager which would do the real implementations.
|
||||||
|
private final ILeaseManager<KinesisClientLease> leaseManager;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor accepts lease manager as only argument.
|
||||||
|
*
|
||||||
|
* @param leaseManager which will do the real implementations
|
||||||
|
*/
|
||||||
|
ExceptionThrowingLeaseManager(ILeaseManager<KinesisClientLease> leaseManager) {
|
||||||
|
this.leaseManager = leaseManager;
|
||||||
|
this.leaseManagerMethodCallingCount = new int[ExceptionThrowingLeaseManagerMethods.values().length];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set parameters used for throwing exception.
|
||||||
|
*
|
||||||
|
* @param method which would throw exception
|
||||||
|
* @param throwingTime defines what time to throw exception
|
||||||
|
*/
|
||||||
|
void setLeaseLeaseManagerThrowingExceptionScenario(ExceptionThrowingLeaseManagerMethods method, int throwingTime) {
|
||||||
|
this.methodThrowingException = method;
|
||||||
|
this.timeThrowingException = throwingTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reset all parameters used for throwing exception.
|
||||||
|
*/
|
||||||
|
void clearLeaseManagerThrowingExceptionScenario() {
|
||||||
|
Arrays.fill(leaseManagerMethodCallingCount, 0);
|
||||||
|
this.methodThrowingException = ExceptionThrowingLeaseManagerMethods.NONE;
|
||||||
|
this.timeThrowingException = Integer.MAX_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Throw exception when the conditions are satisfied :
|
||||||
|
// 1). method equals to methodThrowingException
|
||||||
|
// 2). method calling count equals to what we want
|
||||||
|
private void throwExceptions(String methodName, ExceptionThrowingLeaseManagerMethods method)
|
||||||
|
throws DependencyException {
|
||||||
|
// Increase calling count for this method
|
||||||
|
leaseManagerMethodCallingCount[method.getIndex()]++;
|
||||||
|
if (method.equals(methodThrowingException)
|
||||||
|
&& (leaseManagerMethodCallingCount[method.getIndex()] == timeThrowingException)) {
|
||||||
|
// Throw Dependency Exception if all conditions are satisfied.
|
||||||
|
LOG.debug("Throwing DependencyException in " + methodName);
|
||||||
|
throw new DependencyException(EXCEPTION_MSG);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean createLeaseTableIfNotExists(Long readCapacity, Long writeCapacity)
|
||||||
|
throws ProvisionedThroughputException, DependencyException {
|
||||||
|
throwExceptions("createLeaseTableIfNotExists",
|
||||||
|
ExceptionThrowingLeaseManagerMethods.CREATELEASETABLEIFNOTEXISTS);
|
||||||
|
|
||||||
|
return leaseManager.createLeaseTableIfNotExists(readCapacity, writeCapacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean leaseTableExists() throws DependencyException {
|
||||||
|
throwExceptions("leaseTableExists", ExceptionThrowingLeaseManagerMethods.LEASETABLEEXISTS);
|
||||||
|
|
||||||
|
return leaseManager.leaseTableExists();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException {
|
||||||
|
throwExceptions("waitUntilLeaseTableExists", ExceptionThrowingLeaseManagerMethods.WAITUNTILLEASETABLEEXISTS);
|
||||||
|
|
||||||
|
return leaseManager.waitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<KinesisClientLease> listLeases()
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
throwExceptions("listLeases", ExceptionThrowingLeaseManagerMethods.LISTLEASES);
|
||||||
|
|
||||||
|
return leaseManager.listLeases();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean createLeaseIfNotExists(KinesisClientLease lease)
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
throwExceptions("createLeaseIfNotExists", ExceptionThrowingLeaseManagerMethods.CREATELEASEIFNOTEXISTS);
|
||||||
|
|
||||||
|
return leaseManager.createLeaseIfNotExists(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean renewLease(KinesisClientLease lease)
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
throwExceptions("renewLease", ExceptionThrowingLeaseManagerMethods.RENEWLEASE);
|
||||||
|
|
||||||
|
return leaseManager.renewLease(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean takeLease(KinesisClientLease lease, String owner)
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
throwExceptions("takeLease", ExceptionThrowingLeaseManagerMethods.TAKELEASE);
|
||||||
|
|
||||||
|
return leaseManager.takeLease(lease, owner);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean evictLease(KinesisClientLease lease)
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
throwExceptions("evictLease", ExceptionThrowingLeaseManagerMethods.EVICTLEASE);
|
||||||
|
|
||||||
|
return leaseManager.evictLease(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteLease(KinesisClientLease lease)
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
throwExceptions("deleteLease", ExceptionThrowingLeaseManagerMethods.DELETELEASE);
|
||||||
|
|
||||||
|
leaseManager.deleteLease(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean updateLease(KinesisClientLease lease)
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
throwExceptions("updateLease", ExceptionThrowingLeaseManagerMethods.UPDATELEASE);
|
||||||
|
|
||||||
|
return leaseManager.updateLease(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public KinesisClientLease getLease(String shardId)
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
throwExceptions("getLease", ExceptionThrowingLeaseManagerMethods.GETLEASE);
|
||||||
|
|
||||||
|
return leaseManager.getLease(shardId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteAll() throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
throwExceptions("deleteAll", ExceptionThrowingLeaseManagerMethods.DELETEALL);
|
||||||
|
|
||||||
|
leaseManager.deleteAll();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,349 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import com.amazonaws.ClientConfiguration;
|
||||||
|
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||||
|
import com.amazonaws.regions.Region;
|
||||||
|
import com.amazonaws.regions.RegionUtils;
|
||||||
|
import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient;
|
||||||
|
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
|
||||||
|
import com.amazonaws.services.kinesis.AmazonKinesisClient;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
|
||||||
|
public class KinesisClientLibConfigurationTest {
|
||||||
|
private static final long INVALID_LONG = 0L;
|
||||||
|
private static final int INVALID_INT = 0;
|
||||||
|
|
||||||
|
private static final long TEST_VALUE_LONG = 1000L;
|
||||||
|
private static final int TEST_VALUE_INT = 1000;
|
||||||
|
private static final int PARAMETER_COUNT = 6;
|
||||||
|
|
||||||
|
private static final String TEST_STRING = "TestString";
|
||||||
|
private static final String ALTER_STRING = "AlterString";
|
||||||
|
|
||||||
|
// We don't want any of these tests to run checkpoint validation
|
||||||
|
private static final boolean skipCheckpointValidationValue = false;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationConstructorWithCorrectParamters() {
|
||||||
|
// Test the first two constructor with default values.
|
||||||
|
// All of them should be positive.
|
||||||
|
@SuppressWarnings("unused")
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
new KinesisClientLibConfiguration(TEST_STRING, TEST_STRING, null, TEST_STRING);
|
||||||
|
|
||||||
|
// Test constructor with all valid arguments.
|
||||||
|
config =
|
||||||
|
new KinesisClientLibConfiguration(TEST_STRING,
|
||||||
|
TEST_STRING,
|
||||||
|
TEST_STRING,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_STRING,
|
||||||
|
TEST_VALUE_INT,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
false,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
true,
|
||||||
|
new ClientConfiguration(),
|
||||||
|
new ClientConfiguration(),
|
||||||
|
new ClientConfiguration(),
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_VALUE_INT,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationConstructorWithInvalidParamter() {
|
||||||
|
// Test constructor with invalid parameters.
|
||||||
|
// Initialization should throw an error on invalid argument.
|
||||||
|
// Try each argument at one time.
|
||||||
|
KinesisClientLibConfiguration config = null;
|
||||||
|
long[] longValues =
|
||||||
|
{ TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG };
|
||||||
|
for (int i = 0; i < PARAMETER_COUNT; i++) {
|
||||||
|
longValues[i] = INVALID_LONG;
|
||||||
|
try {
|
||||||
|
config =
|
||||||
|
new KinesisClientLibConfiguration(TEST_STRING,
|
||||||
|
TEST_STRING,
|
||||||
|
TEST_STRING,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
longValues[0],
|
||||||
|
TEST_STRING,
|
||||||
|
TEST_VALUE_INT,
|
||||||
|
longValues[1],
|
||||||
|
false,
|
||||||
|
longValues[2],
|
||||||
|
longValues[3],
|
||||||
|
true,
|
||||||
|
new ClientConfiguration(),
|
||||||
|
new ClientConfiguration(),
|
||||||
|
new ClientConfiguration(),
|
||||||
|
longValues[4],
|
||||||
|
longValues[5],
|
||||||
|
TEST_VALUE_INT,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
null);
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
System.out.println(e.getMessage());
|
||||||
|
}
|
||||||
|
longValues[i] = TEST_VALUE_LONG;
|
||||||
|
}
|
||||||
|
int[] intValues = { TEST_VALUE_INT, TEST_VALUE_INT };
|
||||||
|
for (int i = 0; i < 2; i++) {
|
||||||
|
intValues[i] = INVALID_INT;
|
||||||
|
try {
|
||||||
|
config =
|
||||||
|
new KinesisClientLibConfiguration(TEST_STRING,
|
||||||
|
TEST_STRING,
|
||||||
|
TEST_STRING,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_STRING,
|
||||||
|
intValues[0],
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
false,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
true,
|
||||||
|
new ClientConfiguration(),
|
||||||
|
new ClientConfiguration(),
|
||||||
|
new ClientConfiguration(),
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
intValues[1],
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
null);
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
System.out.println(e.getMessage());
|
||||||
|
}
|
||||||
|
intValues[i] = TEST_VALUE_INT;
|
||||||
|
}
|
||||||
|
Assert.assertTrue("KCLConfiguration should return null when using negative arguments", config == null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationUserAgent() {
|
||||||
|
// There are two three of setting user agent:
|
||||||
|
// 1) Use client configuration default user agent;
|
||||||
|
// 2) Pass client configurations;
|
||||||
|
// 3) Pass user agent.
|
||||||
|
// For each case, after building KCLConfiguration, KINESIS_CLIENT_LIB_USER_AGENT
|
||||||
|
// should be included in user agent.
|
||||||
|
|
||||||
|
// Default user agent should be "appName,KINESIS_CLIENT_LIB_USER_AGENT"
|
||||||
|
String expectedUserAgent = TEST_STRING + "," + KinesisClientLibConfiguration.KINESIS_CLIENT_LIB_USER_AGENT;
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
new KinesisClientLibConfiguration(TEST_STRING, TEST_STRING, null, TEST_STRING);
|
||||||
|
testContainingKCLUserAgent(config, expectedUserAgent);
|
||||||
|
ClientConfiguration clientConfig = new ClientConfiguration();
|
||||||
|
config.withCommonClientConfig(clientConfig);
|
||||||
|
testContainingKCLUserAgent(config, expectedUserAgent);
|
||||||
|
|
||||||
|
// Use alter string to replace app name in KCLConfiguration user agent.
|
||||||
|
expectedUserAgent = ALTER_STRING + "," + KinesisClientLibConfiguration.KINESIS_CLIENT_LIB_USER_AGENT;
|
||||||
|
clientConfig.setUserAgent(ALTER_STRING);
|
||||||
|
config.withCommonClientConfig(clientConfig);
|
||||||
|
testContainingKCLUserAgent(config, expectedUserAgent);
|
||||||
|
config.withUserAgent(ALTER_STRING);
|
||||||
|
testContainingKCLUserAgent(config, expectedUserAgent);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Every aws client configuration in KCL configuration should contain expected user agent
|
||||||
|
private static void testContainingKCLUserAgent(KinesisClientLibConfiguration config, String expectedUserAgent) {
|
||||||
|
Assert.assertTrue("Kinesis client should contain expected User Agent", config.getKinesisClientConfiguration()
|
||||||
|
.getUserAgent()
|
||||||
|
.contains(expectedUserAgent));
|
||||||
|
Assert.assertTrue("DynamoDB client should contain expected User Agent", config.getDynamoDBClientConfiguration()
|
||||||
|
.getUserAgent()
|
||||||
|
.contains(expectedUserAgent));
|
||||||
|
Assert.assertTrue("CloudWatch client should contain expected User Agent",
|
||||||
|
config.getCloudWatchClientConfiguration().getUserAgent().contains(expectedUserAgent));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationWithOnlyRegionPropertyProvided() {
|
||||||
|
// test if the setRegion method has been called for each of the
|
||||||
|
// client once by setting only the region name
|
||||||
|
AmazonKinesisClient kclient = Mockito.mock(AmazonKinesisClient.class);
|
||||||
|
AmazonDynamoDBClient dclient = Mockito.mock(AmazonDynamoDBClient.class);
|
||||||
|
AmazonCloudWatchClient cclient = Mockito.mock(AmazonCloudWatchClient.class);
|
||||||
|
Region region = RegionUtils.getRegion("us-west-2");
|
||||||
|
|
||||||
|
AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class);
|
||||||
|
KinesisClientLibConfiguration kclConfig =
|
||||||
|
new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0")
|
||||||
|
.withRegionName("us-west-2");
|
||||||
|
IRecordProcessorFactory processorFactory = Mockito.mock(IRecordProcessorFactory.class);
|
||||||
|
new Worker(processorFactory, kclConfig, kclient, dclient, cclient);
|
||||||
|
|
||||||
|
Mockito.verify(kclient, Mockito.times(1)).setRegion(region);
|
||||||
|
Mockito.verify(dclient, Mockito.times(1)).setRegion(region);
|
||||||
|
Mockito.verify(cclient, Mockito.times(1)).setRegion(region);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationWithBothRegionAndEndpointProvided() {
|
||||||
|
// test if the setRegion method has been called for each of the
|
||||||
|
// client once and setEndpoint has been called once for kinesis
|
||||||
|
// client by setting kinesis endpoint
|
||||||
|
AmazonKinesisClient kclient = Mockito.mock(AmazonKinesisClient.class);
|
||||||
|
AmazonDynamoDBClient dclient = Mockito.mock(AmazonDynamoDBClient.class);
|
||||||
|
AmazonCloudWatchClient cclient = Mockito.mock(AmazonCloudWatchClient.class);
|
||||||
|
Region region = RegionUtils.getRegion("us-west-2");
|
||||||
|
|
||||||
|
AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class);
|
||||||
|
KinesisClientLibConfiguration kclConfig =
|
||||||
|
new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0")
|
||||||
|
.withRegionName("us-west-2")
|
||||||
|
.withKinesisEndpoint("https://kinesis.eu-west-1.amazonaws.com");
|
||||||
|
IRecordProcessorFactory processorFactory = Mockito.mock(IRecordProcessorFactory.class);
|
||||||
|
new Worker(processorFactory, kclConfig, kclient, dclient, cclient);
|
||||||
|
|
||||||
|
Mockito.verify(kclient, Mockito.times(1)).setRegion(region);
|
||||||
|
Mockito.verify(dclient, Mockito.times(1)).setRegion(region);
|
||||||
|
Mockito.verify(cclient, Mockito.times(1)).setRegion(region);
|
||||||
|
Mockito.verify(kclient, Mockito.times(1)).setEndpoint("https://kinesis.eu-west-1.amazonaws.com");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationWithSimplerWorkerConstructor() {
|
||||||
|
// test simpler worker constructor to see whether the region is been set
|
||||||
|
// by testing how many times the getRegionName and getKinesisEndpoint has
|
||||||
|
// has been called
|
||||||
|
AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class);
|
||||||
|
KinesisClientLibConfiguration kclConfig = Mockito.spy(
|
||||||
|
new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0")
|
||||||
|
.withRegionName("us-west-2")
|
||||||
|
.withKinesisEndpoint("https://kinesis.eu-west-1.amazonaws.com"));
|
||||||
|
|
||||||
|
IRecordProcessorFactory processorFactory = Mockito.mock(IRecordProcessorFactory.class);
|
||||||
|
new Worker(processorFactory, kclConfig);
|
||||||
|
|
||||||
|
Mockito.verify(kclConfig, Mockito.times(9)).getRegionName();
|
||||||
|
Mockito.verify(kclConfig, Mockito.times(4)).getKinesisEndpoint();
|
||||||
|
|
||||||
|
kclConfig = Mockito.spy(
|
||||||
|
new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0")
|
||||||
|
.withKinesisEndpoint("https://kinesis.eu-west-1.amazonaws.com"));
|
||||||
|
|
||||||
|
new Worker(processorFactory, kclConfig);
|
||||||
|
|
||||||
|
Mockito.verify(kclConfig, Mockito.times(3)).getRegionName();
|
||||||
|
Mockito.verify(kclConfig, Mockito.times(3)).getKinesisEndpoint();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationWithMultiRegionWithIlligalRegionName() {
|
||||||
|
// test with illegal region name
|
||||||
|
AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class);
|
||||||
|
|
||||||
|
KinesisClientLibConfiguration kclConfig =
|
||||||
|
new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0");
|
||||||
|
try {
|
||||||
|
kclConfig = kclConfig.withRegionName("abcd");
|
||||||
|
Assert.fail("No expected Exception is thrown.");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
System.out.println(e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationWithMultiRegionWithIlligalRegionNameInFullConstructor() {
|
||||||
|
// test with illegal region name
|
||||||
|
Mockito.mock(AWSCredentialsProvider.class);
|
||||||
|
try {
|
||||||
|
new KinesisClientLibConfiguration(TEST_STRING,
|
||||||
|
TEST_STRING,
|
||||||
|
TEST_STRING,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_STRING,
|
||||||
|
3,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
false,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
true,
|
||||||
|
new ClientConfiguration(),
|
||||||
|
new ClientConfiguration(),
|
||||||
|
new ClientConfiguration(),
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
TEST_VALUE_LONG,
|
||||||
|
1,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
"abcd");
|
||||||
|
Assert.fail("No expected Exception is thrown.");
|
||||||
|
} catch(IllegalArgumentException e) {
|
||||||
|
System.out.println(e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationMetricsDefaults() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
new KinesisClientLibConfiguration("TestApplication", "TestStream", null, "TestWorker");
|
||||||
|
// By default, metrics level should be detailed.
|
||||||
|
assertEquals(config.getMetricsLevel(), MetricsLevel.DETAILED);
|
||||||
|
// By default, only Operation and ShardId dimensions should be enabled.
|
||||||
|
assertEquals(config.getMetricsEnabledDimensions(), ImmutableSet.of("Operation", "ShardId"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationWithMetricsLevel() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
new KinesisClientLibConfiguration("TestApplication", "TestStream", null, "TestWorker")
|
||||||
|
.withMetricsLevel("NONE");
|
||||||
|
assertEquals(config.getMetricsLevel(), MetricsLevel.NONE);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKCLConfigurationWithMetricsEnabledDimensions() {
|
||||||
|
KinesisClientLibConfiguration config =
|
||||||
|
new KinesisClientLibConfiguration("TestApplication", "TestStream", null, "TestWorker")
|
||||||
|
.withMetricsEnabledDimensions(null);
|
||||||
|
// Operation dimension should always be there.
|
||||||
|
assertEquals(config.getMetricsEnabledDimensions(), ImmutableSet.of("Operation"));
|
||||||
|
|
||||||
|
config.withMetricsEnabledDimensions(ImmutableSet.of("WorkerIdentifier"));
|
||||||
|
// Operation dimension should always be there.
|
||||||
|
assertEquals(config.getMetricsEnabledDimensions(), ImmutableSet.of("Operation", "WorkerIdentifier"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,253 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.UUID;
|
||||||
|
import java.util.concurrent.Callable;
|
||||||
|
|
||||||
|
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
|
||||||
|
import com.amazonaws.auth.SystemPropertiesCredentialsProvider;
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
|
||||||
|
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.LeasingException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.Lease;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseRenewer;
|
||||||
|
|
||||||
|
import static org.hamcrest.CoreMatchers.notNullValue;
|
||||||
|
import static org.hamcrest.MatcherAssert.assertThat;
|
||||||
|
|
||||||
|
public class KinesisClientLibLeaseCoordinatorIntegrationTest {
|
||||||
|
|
||||||
|
private static KinesisClientLeaseManager leaseManager;
|
||||||
|
private KinesisClientLibLeaseCoordinator coordinator;
|
||||||
|
private static final String TABLE_NAME = KinesisClientLibLeaseCoordinatorIntegrationTest.class.getSimpleName();
|
||||||
|
private static final String WORKER_ID = UUID.randomUUID().toString();
|
||||||
|
private final String leaseKey = "shd-1";
|
||||||
|
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws ProvisionedThroughputException, DependencyException, InvalidStateException {
|
||||||
|
final boolean useConsistentReads = true;
|
||||||
|
if (leaseManager == null) {
|
||||||
|
AmazonDynamoDBClient ddb = new AmazonDynamoDBClient(new DefaultAWSCredentialsProviderChain());
|
||||||
|
leaseManager =
|
||||||
|
new KinesisClientLeaseManager(TABLE_NAME, ddb, useConsistentReads);
|
||||||
|
}
|
||||||
|
leaseManager.createLeaseTableIfNotExists(10L, 10L);
|
||||||
|
leaseManager.deleteAll();
|
||||||
|
coordinator = new KinesisClientLibLeaseCoordinator(leaseManager, WORKER_ID, 5000L, 50L);
|
||||||
|
coordinator.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests update checkpoint success.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testUpdateCheckpoint() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder();
|
||||||
|
builder.withLease(leaseKey, null).build();
|
||||||
|
|
||||||
|
// Run the taker and renewer in-between getting the Lease object and calling setCheckpoint
|
||||||
|
coordinator.runLeaseTaker();
|
||||||
|
coordinator.runLeaseRenewer();
|
||||||
|
|
||||||
|
KinesisClientLease lease = coordinator.getCurrentlyHeldLease(leaseKey);
|
||||||
|
if (lease == null) {
|
||||||
|
List<KinesisClientLease> leases = leaseManager.listLeases();
|
||||||
|
for (KinesisClientLease kinesisClientLease : leases) {
|
||||||
|
System.out.println(kinesisClientLease);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assertThat(lease, notNullValue());
|
||||||
|
ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint");
|
||||||
|
// lease's leaseCounter is wrong at this point, but it shouldn't matter.
|
||||||
|
Assert.assertTrue(coordinator.setCheckpoint(lease.getLeaseKey(), newCheckpoint, lease.getConcurrencyToken()));
|
||||||
|
|
||||||
|
Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey());
|
||||||
|
|
||||||
|
lease.setLeaseCounter(lease.getLeaseCounter() + 1);
|
||||||
|
lease.setCheckpoint(newCheckpoint);
|
||||||
|
lease.setLeaseOwner(coordinator.getWorkerIdentifier());
|
||||||
|
Assert.assertEquals(lease, fromDynamo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests updateCheckpoint when the lease has changed out from under us.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testUpdateCheckpointLeaseUpdated() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder();
|
||||||
|
builder.withLease(leaseKey, null).build();
|
||||||
|
|
||||||
|
coordinator.runLeaseTaker();
|
||||||
|
coordinator.runLeaseRenewer();
|
||||||
|
KinesisClientLease lease = coordinator.getCurrentlyHeldLease(leaseKey);
|
||||||
|
|
||||||
|
assertThat(lease, notNullValue());
|
||||||
|
leaseManager.renewLease(coordinator.getCurrentlyHeldLease(leaseKey));
|
||||||
|
|
||||||
|
ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint");
|
||||||
|
Assert.assertFalse(coordinator.setCheckpoint(lease.getLeaseKey(), newCheckpoint, lease.getConcurrencyToken()));
|
||||||
|
|
||||||
|
Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey());
|
||||||
|
|
||||||
|
lease.setLeaseCounter(lease.getLeaseCounter() + 1);
|
||||||
|
// Counter and owner changed, but checkpoint did not.
|
||||||
|
lease.setLeaseOwner(coordinator.getWorkerIdentifier());
|
||||||
|
Assert.assertEquals(lease, fromDynamo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests updateCheckpoint with a bad concurrency token.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testUpdateCheckpointBadConcurrencyToken() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder();
|
||||||
|
builder.withLease(leaseKey, null).build();
|
||||||
|
|
||||||
|
coordinator.runLeaseTaker();
|
||||||
|
coordinator.runLeaseRenewer();
|
||||||
|
KinesisClientLease lease = coordinator.getCurrentlyHeldLease(leaseKey);
|
||||||
|
|
||||||
|
assertThat(lease, notNullValue());
|
||||||
|
|
||||||
|
ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint");
|
||||||
|
Assert.assertFalse(coordinator.setCheckpoint(lease.getLeaseKey(), newCheckpoint, UUID.randomUUID()));
|
||||||
|
|
||||||
|
Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey());
|
||||||
|
|
||||||
|
// Owner should be the only thing that changed.
|
||||||
|
lease.setLeaseOwner(coordinator.getWorkerIdentifier());
|
||||||
|
Assert.assertEquals(lease, fromDynamo);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class TestHarnessBuilder {
|
||||||
|
|
||||||
|
private long currentTimeNanos;
|
||||||
|
|
||||||
|
private Map<String, KinesisClientLease> leases = new HashMap<String, KinesisClientLease>();
|
||||||
|
|
||||||
|
private Callable<Long> timeProvider = new Callable<Long>() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Long call() throws Exception {
|
||||||
|
return currentTimeNanos;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
public TestHarnessBuilder withLease(String shardId) {
|
||||||
|
return withLease(shardId, "leaseOwner");
|
||||||
|
}
|
||||||
|
|
||||||
|
public TestHarnessBuilder withLease(String shardId, String owner) {
|
||||||
|
KinesisClientLease lease = new KinesisClientLease();
|
||||||
|
lease.setCheckpoint(new ExtendedSequenceNumber("checkpoint"));
|
||||||
|
lease.setOwnerSwitchesSinceCheckpoint(0L);
|
||||||
|
lease.setLeaseCounter(0L);
|
||||||
|
lease.setLeaseOwner(owner);
|
||||||
|
lease.setParentShardIds(Collections.singleton("parentShardId"));
|
||||||
|
lease.setLeaseKey(shardId);
|
||||||
|
|
||||||
|
leases.put(shardId, lease);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, KinesisClientLease> build() throws LeasingException {
|
||||||
|
for (KinesisClientLease lease : leases.values()) {
|
||||||
|
leaseManager.createLeaseIfNotExists(lease);
|
||||||
|
if (lease.getLeaseOwner() != null) {
|
||||||
|
lease.setLastCounterIncrementNanos(System.nanoTime());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentTimeNanos = System.nanoTime();
|
||||||
|
|
||||||
|
return leases;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void passTime(long millis) {
|
||||||
|
currentTimeNanos += millis * 1000000;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void mutateAssert(String newWorkerIdentifier, KinesisClientLease original, KinesisClientLease actual) {
|
||||||
|
original.setLeaseCounter(original.getLeaseCounter() + 1);
|
||||||
|
if (original.getLeaseOwner() != null && !newWorkerIdentifier.equals(original.getLeaseOwner())) {
|
||||||
|
original.setOwnerSwitchesSinceCheckpoint(original.getOwnerSwitchesSinceCheckpoint() + 1);
|
||||||
|
}
|
||||||
|
original.setLeaseOwner(newWorkerIdentifier);
|
||||||
|
|
||||||
|
Assert.assertEquals(original, actual); // Assert the contents of the lease
|
||||||
|
}
|
||||||
|
|
||||||
|
public void addLeasesToRenew(ILeaseRenewer<KinesisClientLease> renewer, String... shardIds)
|
||||||
|
throws DependencyException, InvalidStateException {
|
||||||
|
List<KinesisClientLease> leasesToRenew = new ArrayList<KinesisClientLease>();
|
||||||
|
|
||||||
|
for (String shardId : shardIds) {
|
||||||
|
KinesisClientLease lease = leases.get(shardId);
|
||||||
|
Assert.assertNotNull(lease);
|
||||||
|
leasesToRenew.add(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
renewer.addLeasesToRenew(leasesToRenew);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, KinesisClientLease> renewMutateAssert(ILeaseRenewer<KinesisClientLease> renewer,
|
||||||
|
String... renewedShardIds) throws DependencyException, InvalidStateException {
|
||||||
|
renewer.renewLeases();
|
||||||
|
|
||||||
|
Map<String, KinesisClientLease> heldLeases = renewer.getCurrentlyHeldLeases();
|
||||||
|
Assert.assertEquals(renewedShardIds.length, heldLeases.size());
|
||||||
|
|
||||||
|
for (String shardId : renewedShardIds) {
|
||||||
|
KinesisClientLease original = leases.get(shardId);
|
||||||
|
Assert.assertNotNull(original);
|
||||||
|
|
||||||
|
KinesisClientLease actual = heldLeases.get(shardId);
|
||||||
|
Assert.assertNotNull(actual);
|
||||||
|
|
||||||
|
original.setLeaseCounter(original.getLeaseCounter() + 1);
|
||||||
|
Assert.assertEquals(original, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
return heldLeases;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void renewAllLeases() throws LeasingException {
|
||||||
|
for (KinesisClientLease lease : leases.values()) {
|
||||||
|
leaseManager.renewLease(lease);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,75 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import static org.mockito.Matchers.anyLong;
|
||||||
|
import static org.mockito.Mockito.doReturn;
|
||||||
|
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.MockitoAnnotations;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
|
||||||
|
|
||||||
|
public class KinesisClientLibLeaseCoordinatorTest {
|
||||||
|
private static final String SHARD_ID = "shardId-test";
|
||||||
|
private static final String WORK_ID = "workId-test";
|
||||||
|
private static final long TEST_LONG = 1000L;
|
||||||
|
private static final ExtendedSequenceNumber TEST_CHKPT = new ExtendedSequenceNumber("string-test");
|
||||||
|
private static final UUID TEST_UUID = UUID.randomUUID();
|
||||||
|
|
||||||
|
@SuppressWarnings("rawtypes")
|
||||||
|
@Mock
|
||||||
|
private ILeaseManager mockLeaseManager;
|
||||||
|
|
||||||
|
private KinesisClientLibLeaseCoordinator leaseCoordinator;
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Before
|
||||||
|
public void setUpLeaseCoordinator() throws ProvisionedThroughputException, DependencyException {
|
||||||
|
// Initialize the annotation
|
||||||
|
MockitoAnnotations.initMocks(this);
|
||||||
|
// Set up lease coordinator
|
||||||
|
doReturn(true).when(mockLeaseManager).createLeaseTableIfNotExists(anyLong(), anyLong());
|
||||||
|
leaseCoordinator = new KinesisClientLibLeaseCoordinator(mockLeaseManager, WORK_ID, TEST_LONG, TEST_LONG);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = ShutdownException.class)
|
||||||
|
public void testSetCheckpointWithUnownedShardId()
|
||||||
|
throws KinesisClientLibException, DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
final boolean succeess = leaseCoordinator.setCheckpoint(SHARD_ID, TEST_CHKPT, TEST_UUID);
|
||||||
|
Assert.assertFalse("Set Checkpoint should return failure", succeess);
|
||||||
|
leaseCoordinator.setCheckpoint(SHARD_ID, TEST_CHKPT, TEST_UUID.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = DependencyException.class)
|
||||||
|
public void testWaitLeaseTableTimeout()
|
||||||
|
throws DependencyException, ProvisionedThroughputException, IllegalStateException {
|
||||||
|
// Set mock lease manager to return false in waiting
|
||||||
|
doReturn(false).when(mockLeaseManager).waitUntilLeaseTableExists(anyLong(), anyLong());
|
||||||
|
leaseCoordinator.initialize();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,188 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import static org.mockito.Mockito.doReturn;
|
||||||
|
import static org.mockito.Mockito.doThrow;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.model.GetRecordsResult;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.amazonaws.services.kinesis.model.ResourceNotFoundException;
|
||||||
|
import com.amazonaws.services.kinesis.model.ShardIteratorType;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unit tests for KinesisDataFetcher.
|
||||||
|
*/
|
||||||
|
public class KinesisDataFetcherTest {
|
||||||
|
|
||||||
|
private static final int MAX_RECORDS = 1;
|
||||||
|
private static final String SHARD_ID = "shardId-1";
|
||||||
|
private static final String AFTER_SEQUENCE_NUMBER = ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString();
|
||||||
|
private static final String AT_SEQUENCE_NUMBER = ShardIteratorType.AT_SEQUENCE_NUMBER.toString();
|
||||||
|
private static final ShardInfo SHARD_INFO = new ShardInfo(SHARD_ID, null, null);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
MetricsHelper.startScope(new NullMetricsFactory(), "KinesisDataFetcherTest");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test initialize() with the LATEST iterator instruction
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testInitializeLatest() throws Exception {
|
||||||
|
testInitializeAndFetch(ShardIteratorType.LATEST.toString(), ShardIteratorType.LATEST.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test initialize() with the TIME_ZERO iterator instruction
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testInitializeTimeZero() throws Exception {
|
||||||
|
testInitializeAndFetch(ShardIteratorType.TRIM_HORIZON.toString(), ShardIteratorType.TRIM_HORIZON.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test initialize() when a flushpoint exists.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testInitializeFlushpoint() throws Exception {
|
||||||
|
testInitializeAndFetch("foo", "123");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test initialize() with an invalid iterator instruction
|
||||||
|
*/
|
||||||
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public final void testInitializeInvalid() throws Exception {
|
||||||
|
testInitializeAndFetch("foo", null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testadvanceIteratorTo() throws KinesisClientLibException {
|
||||||
|
IKinesisProxy kinesis = mock(IKinesisProxy.class);
|
||||||
|
ICheckpoint checkpoint = mock(ICheckpoint.class);
|
||||||
|
|
||||||
|
KinesisDataFetcher fetcher = new KinesisDataFetcher(kinesis, SHARD_INFO);
|
||||||
|
|
||||||
|
String iteratorA = "foo";
|
||||||
|
String iteratorB = "bar";
|
||||||
|
String seqA = "123";
|
||||||
|
String seqB = "456";
|
||||||
|
GetRecordsResult outputA = new GetRecordsResult();
|
||||||
|
List<Record> recordsA = new ArrayList<Record>();
|
||||||
|
outputA.setRecords(recordsA);
|
||||||
|
GetRecordsResult outputB = new GetRecordsResult();
|
||||||
|
List<Record> recordsB = new ArrayList<Record>();
|
||||||
|
outputB.setRecords(recordsB);
|
||||||
|
|
||||||
|
when(kinesis.getIterator(SHARD_ID, AT_SEQUENCE_NUMBER, seqA)).thenReturn(iteratorA);
|
||||||
|
when(kinesis.getIterator(SHARD_ID, AT_SEQUENCE_NUMBER, seqB)).thenReturn(iteratorB);
|
||||||
|
when(kinesis.get(iteratorA, MAX_RECORDS)).thenReturn(outputA);
|
||||||
|
when(kinesis.get(iteratorB, MAX_RECORDS)).thenReturn(outputB);
|
||||||
|
|
||||||
|
when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqA));
|
||||||
|
fetcher.initialize(seqA);
|
||||||
|
|
||||||
|
fetcher.advanceIteratorTo(seqA);
|
||||||
|
Assert.assertEquals(recordsA, fetcher.getRecords(MAX_RECORDS).getRecords());
|
||||||
|
|
||||||
|
fetcher.advanceIteratorTo(seqB);
|
||||||
|
Assert.assertEquals(recordsB, fetcher.getRecords(MAX_RECORDS).getRecords());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testadvanceIteratorToTrimHorizonAndLatest() {
|
||||||
|
IKinesisProxy kinesis = mock(IKinesisProxy.class);
|
||||||
|
|
||||||
|
KinesisDataFetcher fetcher = new KinesisDataFetcher(kinesis, SHARD_INFO);
|
||||||
|
|
||||||
|
String iteratorHorizon = "horizon";
|
||||||
|
when(kinesis.getIterator(SHARD_ID,
|
||||||
|
ShardIteratorType.TRIM_HORIZON.toString(), null)).thenReturn(iteratorHorizon);
|
||||||
|
fetcher.advanceIteratorTo(ShardIteratorType.TRIM_HORIZON.toString());
|
||||||
|
Assert.assertEquals(iteratorHorizon, fetcher.getNextIterator());
|
||||||
|
|
||||||
|
String iteratorLatest = "latest";
|
||||||
|
when(kinesis.getIterator(SHARD_ID, ShardIteratorType.LATEST.toString(), null)).thenReturn(iteratorLatest);
|
||||||
|
fetcher.advanceIteratorTo(ShardIteratorType.LATEST.toString());
|
||||||
|
Assert.assertEquals(iteratorLatest, fetcher.getNextIterator());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetRecordsWithResourceNotFoundException() {
|
||||||
|
// Set up arguments used by proxy
|
||||||
|
String nextIterator = "TestShardIterator";
|
||||||
|
int maxRecords = 100;
|
||||||
|
|
||||||
|
// Set up proxy mock methods
|
||||||
|
KinesisProxy mockProxy = mock(KinesisProxy.class);
|
||||||
|
doReturn(nextIterator).when(mockProxy).getIterator(SHARD_ID, ShardIteratorType.LATEST.toString(), null);
|
||||||
|
doThrow(new ResourceNotFoundException("Test Exception")).when(mockProxy).get(nextIterator, maxRecords);
|
||||||
|
|
||||||
|
// Create data fectcher and initialize it with latest type checkpoint
|
||||||
|
KinesisDataFetcher dataFetcher = new KinesisDataFetcher(mockProxy, SHARD_INFO);
|
||||||
|
dataFetcher.initialize(SentinelCheckpoint.LATEST.toString());
|
||||||
|
// Call getRecords of dataFetcher which will throw an exception
|
||||||
|
dataFetcher.getRecords(maxRecords);
|
||||||
|
|
||||||
|
// Test shard has reached the end
|
||||||
|
Assert.assertTrue("Shard should reach the end", dataFetcher.isShardEndReached());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testInitializeAndFetch(String iteratorType, String seqNo) throws Exception {
|
||||||
|
IKinesisProxy kinesis = mock(IKinesisProxy.class);
|
||||||
|
String iterator = "foo";
|
||||||
|
List<Record> expectedRecords = new ArrayList<Record>();
|
||||||
|
GetRecordsResult response = new GetRecordsResult();
|
||||||
|
response.setRecords(expectedRecords);
|
||||||
|
|
||||||
|
|
||||||
|
when(kinesis.getIterator(SHARD_ID, iteratorType, null)).thenReturn(iterator);
|
||||||
|
when(kinesis.getIterator(SHARD_ID, AT_SEQUENCE_NUMBER, seqNo)).thenReturn(iterator);
|
||||||
|
when(kinesis.get(iterator, MAX_RECORDS)).thenReturn(response);
|
||||||
|
|
||||||
|
ICheckpoint checkpoint = mock(ICheckpoint.class);
|
||||||
|
when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqNo));
|
||||||
|
|
||||||
|
KinesisDataFetcher fetcher = new KinesisDataFetcher(kinesis, SHARD_INFO);
|
||||||
|
|
||||||
|
fetcher.initialize(seqNo);
|
||||||
|
List<Record> actualRecords = fetcher.getRecords(MAX_RECORDS).getRecords();
|
||||||
|
|
||||||
|
Assert.assertEquals(expectedRecords, actualRecords);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,366 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertNull;
|
||||||
|
import static org.junit.Assert.assertSame;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.mockito.Matchers.anyInt;
|
||||||
|
import static org.mockito.Mockito.doReturn;
|
||||||
|
import static org.mockito.Mockito.doThrow;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.math.BigInteger;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.security.MessageDigest;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Date;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.UUID;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.ArgumentCaptor;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.MockitoAnnotations;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.Messages.AggregatedRecord;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
|
||||||
|
import com.amazonaws.services.kinesis.model.GetRecordsResult;
|
||||||
|
import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.google.protobuf.ByteString;
|
||||||
|
|
||||||
|
public class ProcessTaskTest {
|
||||||
|
|
||||||
|
@SuppressWarnings("serial")
|
||||||
|
private static class RecordSubclass extends Record {}
|
||||||
|
|
||||||
|
private static final byte[] TEST_DATA = new byte[] { 1, 2, 3, 4 };
|
||||||
|
|
||||||
|
private final int maxRecords = 100;
|
||||||
|
private final String shardId = "shard-test";
|
||||||
|
private final long idleTimeMillis = 1000L;
|
||||||
|
private final long taskBackoffTimeMillis = 1L;
|
||||||
|
private final boolean callProcessRecordsForEmptyRecordList = true;
|
||||||
|
// We don't want any of these tests to run checkpoint validation
|
||||||
|
private final boolean skipCheckpointValidationValue = false;
|
||||||
|
private final InitialPositionInStream initialPositionInStream = InitialPositionInStream.LATEST;
|
||||||
|
|
||||||
|
private @Mock KinesisDataFetcher mockDataFetcher;
|
||||||
|
private @Mock IRecordProcessor mockRecordProcessor;
|
||||||
|
private @Mock RecordProcessorCheckpointer mockCheckpointer;
|
||||||
|
|
||||||
|
private List<Record> processedRecords;
|
||||||
|
private ExtendedSequenceNumber newLargestPermittedCheckpointValue;
|
||||||
|
|
||||||
|
private ProcessTask processTask;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUpProcessTask() {
|
||||||
|
// Initialize the annotation
|
||||||
|
MockitoAnnotations.initMocks(this);
|
||||||
|
// Set up process task
|
||||||
|
final StreamConfig config =
|
||||||
|
new StreamConfig(null, maxRecords, idleTimeMillis, callProcessRecordsForEmptyRecordList,
|
||||||
|
skipCheckpointValidationValue, initialPositionInStream);
|
||||||
|
final ShardInfo shardInfo = new ShardInfo(shardId, null, null);
|
||||||
|
processTask = new ProcessTask(
|
||||||
|
shardInfo, config, mockRecordProcessor, mockCheckpointer, mockDataFetcher, taskBackoffTimeMillis);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testProcessTaskWithProvisionedThroughputExceededException() {
|
||||||
|
// Set data fetcher to throw exception
|
||||||
|
doReturn(false).when(mockDataFetcher).isShardEndReached();
|
||||||
|
doThrow(new ProvisionedThroughputExceededException("Test Exception")).when(mockDataFetcher)
|
||||||
|
.getRecords(maxRecords);
|
||||||
|
|
||||||
|
TaskResult result = processTask.call();
|
||||||
|
assertTrue("Result should contain ProvisionedThroughputExceededException",
|
||||||
|
result.getException() instanceof ProvisionedThroughputExceededException);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testProcessTaskWithNonExistentStream() {
|
||||||
|
// Data fetcher returns a null Result when the stream does not exist
|
||||||
|
doReturn(null).when(mockDataFetcher).getRecords(maxRecords);
|
||||||
|
|
||||||
|
TaskResult result = processTask.call();
|
||||||
|
assertNull("Task should not throw an exception", result.getException());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testProcessTaskWithShardEndReached() {
|
||||||
|
// Set data fetcher to return true for shard end reached
|
||||||
|
doReturn(true).when(mockDataFetcher).isShardEndReached();
|
||||||
|
|
||||||
|
TaskResult result = processTask.call();
|
||||||
|
assertTrue("Result should contain shardEndReached true", result.isShardEndReached());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNonAggregatedKinesisRecord() {
|
||||||
|
final String sqn = new BigInteger(128, new Random()).toString();
|
||||||
|
final String pk = UUID.randomUUID().toString();
|
||||||
|
final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS));
|
||||||
|
final Record r = new Record()
|
||||||
|
.withPartitionKey(pk)
|
||||||
|
.withData(ByteBuffer.wrap(TEST_DATA))
|
||||||
|
.withSequenceNumber(sqn)
|
||||||
|
.withApproximateArrivalTimestamp(ts);
|
||||||
|
|
||||||
|
testWithRecord(r);
|
||||||
|
|
||||||
|
assertEquals(1, processedRecords.size());
|
||||||
|
|
||||||
|
Record pr = processedRecords.get(0);
|
||||||
|
assertEquals(pk, pr.getPartitionKey());
|
||||||
|
assertEquals(ts, pr.getApproximateArrivalTimestamp());
|
||||||
|
byte[] b = new byte[pr.getData().remaining()];
|
||||||
|
pr.getData().get(b);
|
||||||
|
assertTrue(Arrays.equals(TEST_DATA, b));
|
||||||
|
|
||||||
|
assertEquals(sqn, newLargestPermittedCheckpointValue.getSequenceNumber());
|
||||||
|
assertEquals(0, newLargestPermittedCheckpointValue.getSubSequenceNumber());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDoesNotDeaggregateSubclassOfRecord() {
|
||||||
|
final String sqn = new BigInteger(128, new Random()).toString();
|
||||||
|
final Record r = new RecordSubclass()
|
||||||
|
.withSequenceNumber(sqn)
|
||||||
|
.withData(ByteBuffer.wrap(new byte[0]));
|
||||||
|
|
||||||
|
testWithRecord(r);
|
||||||
|
|
||||||
|
assertEquals(1, processedRecords.size(), 1);
|
||||||
|
assertSame(r, processedRecords.get(0));
|
||||||
|
|
||||||
|
assertEquals(sqn, newLargestPermittedCheckpointValue.getSequenceNumber());
|
||||||
|
assertEquals(0, newLargestPermittedCheckpointValue.getSubSequenceNumber());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDeaggregatesRecord() {
|
||||||
|
final String sqn = new BigInteger(128, new Random()).toString();
|
||||||
|
final String pk = UUID.randomUUID().toString();
|
||||||
|
final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS));
|
||||||
|
final Record r = new Record()
|
||||||
|
.withPartitionKey("-")
|
||||||
|
.withData(generateAggregatedRecord(pk))
|
||||||
|
.withSequenceNumber(sqn)
|
||||||
|
.withApproximateArrivalTimestamp(ts);
|
||||||
|
|
||||||
|
testWithRecord(r);
|
||||||
|
|
||||||
|
assertEquals(3, processedRecords.size());
|
||||||
|
for (Record pr : processedRecords) {
|
||||||
|
assertTrue(pr instanceof UserRecord);
|
||||||
|
assertEquals(pk, pr.getPartitionKey());
|
||||||
|
assertEquals(ts, pr.getApproximateArrivalTimestamp());
|
||||||
|
byte[] b = new byte[pr.getData().remaining()];
|
||||||
|
pr.getData().get(b);
|
||||||
|
assertTrue(Arrays.equals(TEST_DATA, b));
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(sqn, newLargestPermittedCheckpointValue.getSequenceNumber());
|
||||||
|
assertEquals(processedRecords.size() - 1, newLargestPermittedCheckpointValue.getSubSequenceNumber());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDeaggregatesRecordWithNoArrivalTimestamp() {
|
||||||
|
final String sqn = new BigInteger(128, new Random()).toString();
|
||||||
|
final String pk = UUID.randomUUID().toString();
|
||||||
|
final Record r = new Record()
|
||||||
|
.withPartitionKey("-")
|
||||||
|
.withData(generateAggregatedRecord(pk))
|
||||||
|
.withSequenceNumber(sqn);
|
||||||
|
|
||||||
|
testWithRecord(r);
|
||||||
|
|
||||||
|
assertEquals(3, processedRecords.size());
|
||||||
|
for (Record pr : processedRecords) {
|
||||||
|
assertTrue(pr instanceof UserRecord);
|
||||||
|
assertEquals(pk, pr.getPartitionKey());
|
||||||
|
assertNull(pr.getApproximateArrivalTimestamp());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLargestPermittedCheckpointValue() {
|
||||||
|
// Some sequence number value from previous processRecords call to mock.
|
||||||
|
final BigInteger previousCheckpointSqn = new BigInteger(128, new Random());
|
||||||
|
|
||||||
|
// Values for this processRecords call.
|
||||||
|
final int numberOfRecords = 104;
|
||||||
|
// Start these batch of records's sequence number that is greater than previous checkpoint value.
|
||||||
|
final BigInteger startingSqn = previousCheckpointSqn.add(BigInteger.valueOf(10));
|
||||||
|
final List<Record> records = generateConsecutiveRecords(
|
||||||
|
numberOfRecords, "-", ByteBuffer.wrap(TEST_DATA), new Date(), startingSqn);
|
||||||
|
|
||||||
|
testWithRecords(records, new ExtendedSequenceNumber(previousCheckpointSqn.toString()),
|
||||||
|
new ExtendedSequenceNumber(previousCheckpointSqn.toString()));
|
||||||
|
|
||||||
|
final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber(
|
||||||
|
startingSqn.add(BigInteger.valueOf(numberOfRecords - 1)).toString());
|
||||||
|
assertEquals(expectedLargestPermittedEsqn, newLargestPermittedCheckpointValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLargestPermittedCheckpointValueWithEmptyRecords() {
|
||||||
|
// Some sequence number value from previous processRecords call.
|
||||||
|
final BigInteger baseSqn = new BigInteger(128, new Random());
|
||||||
|
final ExtendedSequenceNumber lastCheckpointEspn = new ExtendedSequenceNumber(baseSqn.toString());
|
||||||
|
final ExtendedSequenceNumber largestPermittedEsqn = new ExtendedSequenceNumber(
|
||||||
|
baseSqn.add(BigInteger.valueOf(100)).toString());
|
||||||
|
|
||||||
|
testWithRecords(Collections.<Record>emptyList(), lastCheckpointEspn, largestPermittedEsqn);
|
||||||
|
|
||||||
|
// Make sure that even with empty records, largest permitted sequence number does not change.
|
||||||
|
assertEquals(largestPermittedEsqn, newLargestPermittedCheckpointValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFilterBasedOnLastCheckpointValue() {
|
||||||
|
// Explanation of setup:
|
||||||
|
// * Assume in previous processRecord call, user got 3 sub-records that all belonged to one
|
||||||
|
// Kinesis record. So sequence number was X, and sub-sequence numbers were 0, 1, 2.
|
||||||
|
// * 2nd sub-record was checkpointed (extended sequnce number X.1).
|
||||||
|
// * Worker crashed and restarted. So now DDB has checkpoint value of X.1.
|
||||||
|
// Test:
|
||||||
|
// * Now in the subsequent processRecords call, KCL should filter out X.0 and X.1.
|
||||||
|
final BigInteger previousCheckpointSqn = new BigInteger(128, new Random());
|
||||||
|
final long previousCheckpointSsqn = 1;
|
||||||
|
|
||||||
|
// Values for this processRecords call.
|
||||||
|
final String startingSqn = previousCheckpointSqn.toString();
|
||||||
|
final String pk = UUID.randomUUID().toString();
|
||||||
|
final Record r = new Record()
|
||||||
|
.withPartitionKey("-")
|
||||||
|
.withData(generateAggregatedRecord(pk))
|
||||||
|
.withSequenceNumber(startingSqn);
|
||||||
|
|
||||||
|
testWithRecords(Collections.singletonList(r),
|
||||||
|
new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn),
|
||||||
|
new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn));
|
||||||
|
|
||||||
|
// First two records should be dropped - and only 1 remaining records should be there.
|
||||||
|
assertEquals(1, processedRecords.size());
|
||||||
|
assertTrue(processedRecords.get(0) instanceof UserRecord);
|
||||||
|
|
||||||
|
// Verify user record's extended sequence number and other fields.
|
||||||
|
final UserRecord pr = (UserRecord)processedRecords.get(0);
|
||||||
|
assertEquals(pk, pr.getPartitionKey());
|
||||||
|
assertEquals(startingSqn, pr.getSequenceNumber());
|
||||||
|
assertEquals(previousCheckpointSsqn + 1, pr.getSubSequenceNumber());
|
||||||
|
assertNull(pr.getApproximateArrivalTimestamp());
|
||||||
|
|
||||||
|
// Expected largest permitted sequence number will be last sub-record sequence number.
|
||||||
|
final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber(
|
||||||
|
previousCheckpointSqn.toString(), 2L);
|
||||||
|
assertEquals(expectedLargestPermittedEsqn, newLargestPermittedCheckpointValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testWithRecord(Record record) {
|
||||||
|
testWithRecords(Collections.singletonList(record),
|
||||||
|
ExtendedSequenceNumber.TRIM_HORIZON, ExtendedSequenceNumber.TRIM_HORIZON);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testWithRecords(List<Record> records,
|
||||||
|
ExtendedSequenceNumber lastCheckpointValue,
|
||||||
|
ExtendedSequenceNumber largestPermittedCheckpointValue) {
|
||||||
|
when(mockDataFetcher.getRecords(anyInt())).thenReturn(
|
||||||
|
new GetRecordsResult().withRecords(records));
|
||||||
|
when(mockCheckpointer.getLastCheckpointValue()).thenReturn(lastCheckpointValue);
|
||||||
|
when(mockCheckpointer.getLargestPermittedCheckpointValue()).thenReturn(largestPermittedCheckpointValue);
|
||||||
|
processTask.call();
|
||||||
|
|
||||||
|
ArgumentCaptor<ProcessRecordsInput> priCaptor = ArgumentCaptor.forClass(ProcessRecordsInput.class);
|
||||||
|
verify(mockRecordProcessor).processRecords(priCaptor.capture());
|
||||||
|
processedRecords = priCaptor.getValue().getRecords();
|
||||||
|
|
||||||
|
ArgumentCaptor<ExtendedSequenceNumber> esnCaptor = ArgumentCaptor.forClass(ExtendedSequenceNumber.class);
|
||||||
|
verify(mockCheckpointer).setLargestPermittedCheckpointValue(esnCaptor.capture());
|
||||||
|
newLargestPermittedCheckpointValue = esnCaptor.getValue();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* See the KPL documentation on GitHub for more details about the binary
|
||||||
|
* format.
|
||||||
|
*
|
||||||
|
* @param pk
|
||||||
|
* Partition key to use. All the records will have the same
|
||||||
|
* partition key.
|
||||||
|
* @return ByteBuffer containing the serialized form of the aggregated
|
||||||
|
* record, along with the necessary header and footer.
|
||||||
|
*/
|
||||||
|
private static ByteBuffer generateAggregatedRecord(String pk) {
|
||||||
|
ByteBuffer bb = ByteBuffer.allocate(1024);
|
||||||
|
bb.put(new byte[] {-13, -119, -102, -62 });
|
||||||
|
|
||||||
|
com.amazonaws.services.kinesis.clientlibrary.types.Messages.Record r =
|
||||||
|
com.amazonaws.services.kinesis.clientlibrary.types.Messages.Record.newBuilder()
|
||||||
|
.setData(ByteString.copyFrom(TEST_DATA))
|
||||||
|
.setPartitionKeyIndex(0)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
byte[] payload = AggregatedRecord.newBuilder()
|
||||||
|
.addPartitionKeyTable(pk)
|
||||||
|
.addRecords(r)
|
||||||
|
.addRecords(r)
|
||||||
|
.addRecords(r)
|
||||||
|
.build()
|
||||||
|
.toByteArray();
|
||||||
|
|
||||||
|
bb.put(payload);
|
||||||
|
bb.put(md5(payload));
|
||||||
|
bb.limit(bb.position());
|
||||||
|
bb.rewind();
|
||||||
|
return bb;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static List<Record> generateConsecutiveRecords(
|
||||||
|
int numberOfRecords, String partitionKey, ByteBuffer data,
|
||||||
|
Date arrivalTimestamp, BigInteger startSequenceNumber) {
|
||||||
|
List<Record> records = new ArrayList<>();
|
||||||
|
for (int i = 0 ; i < numberOfRecords ; ++i) {
|
||||||
|
records.add(new Record()
|
||||||
|
.withPartitionKey(partitionKey)
|
||||||
|
.withData(data)
|
||||||
|
.withSequenceNumber(startSequenceNumber.add(BigInteger.valueOf(i)).toString())
|
||||||
|
.withApproximateArrivalTimestamp(arrivalTimestamp));
|
||||||
|
}
|
||||||
|
return records;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static byte[] md5(byte[] b) {
|
||||||
|
try {
|
||||||
|
MessageDigest md = MessageDigest.getInstance("MD5");
|
||||||
|
return md.digest(b);
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,418 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map.Entry;
|
||||||
|
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.InMemoryCheckpointImpl;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Matchers.anyString;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class RecordProcessorCheckpointerTest {
|
||||||
|
private String startingSequenceNumber = "13";
|
||||||
|
private ExtendedSequenceNumber startingExtendedSequenceNumber = new ExtendedSequenceNumber(startingSequenceNumber);
|
||||||
|
private String testConcurrencyToken = "testToken";
|
||||||
|
private ICheckpoint checkpoint;
|
||||||
|
private String shardId = "shardId-123";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
checkpoint = new InMemoryCheckpointImpl(startingSequenceNumber);
|
||||||
|
// A real checkpoint will return a checkpoint value after it is initialized.
|
||||||
|
checkpoint.setCheckpoint(shardId, startingExtendedSequenceNumber, testConcurrencyToken);
|
||||||
|
Assert.assertEquals(this.startingExtendedSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for
|
||||||
|
* {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCheckpoint() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
|
||||||
|
|
||||||
|
// First call to checkpoint
|
||||||
|
RecordProcessorCheckpointer processingCheckpointer =
|
||||||
|
new RecordProcessorCheckpointer(shardInfo, checkpoint, null);
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(startingExtendedSequenceNumber);
|
||||||
|
processingCheckpointer.checkpoint();
|
||||||
|
Assert.assertEquals(startingExtendedSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
|
||||||
|
// Advance checkpoint
|
||||||
|
ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("5019");
|
||||||
|
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(sequenceNumber);
|
||||||
|
processingCheckpointer.checkpoint();
|
||||||
|
Assert.assertEquals(sequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for
|
||||||
|
* {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(Record record)}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCheckpointRecord() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
|
||||||
|
SequenceNumberValidator sequenceNumberValidator =
|
||||||
|
new SequenceNumberValidator(null, shardId, false);
|
||||||
|
RecordProcessorCheckpointer processingCheckpointer =
|
||||||
|
new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator);
|
||||||
|
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||||
|
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5025");
|
||||||
|
Record record = new Record().withSequenceNumber("5025");
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber);
|
||||||
|
processingCheckpointer.checkpoint(record);
|
||||||
|
Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for
|
||||||
|
* {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(UserRecord record)}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCheckpointSubRecord() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
|
||||||
|
SequenceNumberValidator sequenceNumberValidator =
|
||||||
|
new SequenceNumberValidator(null, shardId, false);
|
||||||
|
RecordProcessorCheckpointer processingCheckpointer =
|
||||||
|
new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator);
|
||||||
|
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||||
|
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030");
|
||||||
|
Record record = new Record().withSequenceNumber("5030");
|
||||||
|
UserRecord subRecord = new UserRecord(record);
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber);
|
||||||
|
processingCheckpointer.checkpoint(subRecord);
|
||||||
|
Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for
|
||||||
|
* {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(String sequenceNumber)}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCheckpointSequenceNumber() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
|
||||||
|
SequenceNumberValidator sequenceNumberValidator =
|
||||||
|
new SequenceNumberValidator(null, shardId, false);
|
||||||
|
RecordProcessorCheckpointer processingCheckpointer =
|
||||||
|
new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator);
|
||||||
|
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||||
|
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035");
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber);
|
||||||
|
processingCheckpointer.checkpoint("5035");
|
||||||
|
Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for
|
||||||
|
* {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(String sequenceNumber, long subSequenceNumber)}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCheckpointExtendedSequenceNumber() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
|
||||||
|
SequenceNumberValidator sequenceNumberValidator =
|
||||||
|
new SequenceNumberValidator(null, shardId, false);
|
||||||
|
RecordProcessorCheckpointer processingCheckpointer =
|
||||||
|
new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator);
|
||||||
|
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||||
|
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040");
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber);
|
||||||
|
processingCheckpointer.checkpoint("5040", 0);
|
||||||
|
Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for update()
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testUpdate() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
|
||||||
|
|
||||||
|
RecordProcessorCheckpointer checkpointer = new RecordProcessorCheckpointer(shardInfo, checkpoint, null);
|
||||||
|
|
||||||
|
ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("10");
|
||||||
|
checkpointer.setLargestPermittedCheckpointValue(sequenceNumber);
|
||||||
|
Assert.assertEquals(sequenceNumber, checkpointer.getLargestPermittedCheckpointValue());
|
||||||
|
|
||||||
|
sequenceNumber = new ExtendedSequenceNumber("90259185948592875928375908214918273491783097");
|
||||||
|
checkpointer.setLargestPermittedCheckpointValue(sequenceNumber);
|
||||||
|
Assert.assertEquals(sequenceNumber, checkpointer.getLargestPermittedCheckpointValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This test is a mixed test of checking some basic functionality of checkpointing at a sequence number and making
|
||||||
|
* sure certain bounds checks and validations are being performed inside the checkpointer to prevent clients from
|
||||||
|
* checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be checkpointing
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testClientSpecifiedCheckpoint() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
|
||||||
|
|
||||||
|
SequenceNumberValidator validator = mock(SequenceNumberValidator.class);
|
||||||
|
Mockito.doNothing().when(validator).validateSequenceNumber(anyString());
|
||||||
|
RecordProcessorCheckpointer processingCheckpointer =
|
||||||
|
new RecordProcessorCheckpointer(shardInfo, checkpoint, validator);
|
||||||
|
|
||||||
|
// Several checkpoints we're gonna hit
|
||||||
|
ExtendedSequenceNumber tooSmall = new ExtendedSequenceNumber("2");
|
||||||
|
ExtendedSequenceNumber firstSequenceNumber = checkpoint.getCheckpoint(shardId); // 13
|
||||||
|
ExtendedSequenceNumber secondSequenceNumber = new ExtendedSequenceNumber("127");
|
||||||
|
ExtendedSequenceNumber thirdSequenceNumber = new ExtendedSequenceNumber("5019");
|
||||||
|
ExtendedSequenceNumber lastSequenceNumberOfShard = new ExtendedSequenceNumber("6789");
|
||||||
|
ExtendedSequenceNumber tooBigSequenceNumber = new ExtendedSequenceNumber("9000");
|
||||||
|
|
||||||
|
processingCheckpointer.setInitialCheckpointValue(firstSequenceNumber);
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(thirdSequenceNumber);
|
||||||
|
|
||||||
|
// confirm that we cannot move backward
|
||||||
|
try {
|
||||||
|
processingCheckpointer.checkpoint(tooSmall.getSequenceNumber(), tooSmall.getSubSequenceNumber());
|
||||||
|
Assert.fail("You shouldn't be able to checkpoint earlier than the initial checkpoint.");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
// yay!
|
||||||
|
}
|
||||||
|
|
||||||
|
// advance to first
|
||||||
|
processingCheckpointer.checkpoint(firstSequenceNumber.getSequenceNumber(), firstSequenceNumber.getSubSequenceNumber());
|
||||||
|
Assert.assertEquals(firstSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
processingCheckpointer.checkpoint(firstSequenceNumber.getSequenceNumber(), firstSequenceNumber.getSubSequenceNumber());
|
||||||
|
Assert.assertEquals(firstSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
|
||||||
|
// advance to second
|
||||||
|
processingCheckpointer.checkpoint(secondSequenceNumber.getSequenceNumber(), secondSequenceNumber.getSubSequenceNumber());
|
||||||
|
Assert.assertEquals(secondSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
|
||||||
|
ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt =
|
||||||
|
{ tooSmall, // Shouldn't be able to move before the first value we ever checkpointed
|
||||||
|
firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number
|
||||||
|
tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer
|
||||||
|
lastSequenceNumberOfShard, // Just another big value that we will use later
|
||||||
|
null, // Not a valid sequence number
|
||||||
|
new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string
|
||||||
|
ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max
|
||||||
|
ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value
|
||||||
|
ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value
|
||||||
|
};
|
||||||
|
for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) {
|
||||||
|
try {
|
||||||
|
processingCheckpointer.checkpoint(badCheckpointValue.getSequenceNumber(), badCheckpointValue.getSubSequenceNumber());
|
||||||
|
fail("checkpointing at bad or out of order sequence didn't throw exception");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
|
||||||
|
} catch (NullPointerException e) {
|
||||||
|
|
||||||
|
}
|
||||||
|
Assert.assertEquals("Checkpoint value should not have changed",
|
||||||
|
secondSequenceNumber,
|
||||||
|
checkpoint.getCheckpoint(shardId));
|
||||||
|
Assert.assertEquals("Last checkpoint value should not have changed",
|
||||||
|
secondSequenceNumber,
|
||||||
|
processingCheckpointer.getLastCheckpointValue());
|
||||||
|
Assert.assertEquals("Largest sequence number should not have changed",
|
||||||
|
thirdSequenceNumber,
|
||||||
|
processingCheckpointer.getLargestPermittedCheckpointValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
// advance to third number
|
||||||
|
processingCheckpointer.checkpoint(thirdSequenceNumber.getSequenceNumber(), thirdSequenceNumber.getSubSequenceNumber());
|
||||||
|
Assert.assertEquals(thirdSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||||
|
|
||||||
|
// Testing a feature that prevents checkpointing at SHARD_END twice
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(lastSequenceNumberOfShard);
|
||||||
|
processingCheckpointer.setSequenceNumberAtShardEnd(processingCheckpointer.getLargestPermittedCheckpointValue());
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END);
|
||||||
|
processingCheckpointer.checkpoint(lastSequenceNumberOfShard.getSequenceNumber(), lastSequenceNumberOfShard.getSubSequenceNumber());
|
||||||
|
Assert.assertEquals("Checkpoing at the sequence number at the end of a shard should be the same as "
|
||||||
|
+ "checkpointing at SHARD_END",
|
||||||
|
ExtendedSequenceNumber.SHARD_END,
|
||||||
|
processingCheckpointer.getLastCheckpointValue());
|
||||||
|
}
|
||||||
|
|
||||||
|
private enum CheckpointAction {
|
||||||
|
NONE, NO_SEQUENCE_NUMBER, WITH_SEQUENCE_NUMBER;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests a bunch of mixed calls between checkpoint() and checkpoint(sequenceNumber) using a helper function.
|
||||||
|
*
|
||||||
|
* Also covers an edge case scenario where a shard consumer is started on a shard that never receives any records
|
||||||
|
* and is then shutdown
|
||||||
|
*
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("serial")
|
||||||
|
@Test
|
||||||
|
public final void testMixedCheckpointCalls() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null);
|
||||||
|
|
||||||
|
SequenceNumberValidator validator = mock(SequenceNumberValidator.class);
|
||||||
|
Mockito.doNothing().when(validator).validateSequenceNumber(anyString());
|
||||||
|
|
||||||
|
RecordProcessorCheckpointer processingCheckpointer =
|
||||||
|
new RecordProcessorCheckpointer(shardInfo, checkpoint, validator);
|
||||||
|
|
||||||
|
List<LinkedHashMap<String, CheckpointAction>> testPlans =
|
||||||
|
new ArrayList<LinkedHashMap<String, CheckpointAction>>();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Simulate a scenario where the checkpointer is created at "latest".
|
||||||
|
*
|
||||||
|
* Then the processor is called with no records (e.g. no more records are added, but the processor might be
|
||||||
|
* called just to allow checkpointing).
|
||||||
|
*
|
||||||
|
* Then the processor is shutdown.
|
||||||
|
*/
|
||||||
|
testPlans.add(new LinkedHashMap<String, CheckpointAction>() {
|
||||||
|
{
|
||||||
|
put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
// Nearly the same as the previous test, but we don't call checkpoint after LATEST
|
||||||
|
testPlans.add(new LinkedHashMap<String, CheckpointAction>() {
|
||||||
|
{
|
||||||
|
put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NONE);
|
||||||
|
put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start with TRIM_HORIZON
|
||||||
|
testPlans.add(new LinkedHashMap<String, CheckpointAction>() {
|
||||||
|
{
|
||||||
|
put(SentinelCheckpoint.TRIM_HORIZON.toString(), CheckpointAction.NONE);
|
||||||
|
put("1", CheckpointAction.NONE);
|
||||||
|
put("2", CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
put("3", CheckpointAction.NONE);
|
||||||
|
put("4", CheckpointAction.WITH_SEQUENCE_NUMBER);
|
||||||
|
put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start with LATEST and a bit more complexity
|
||||||
|
testPlans.add(new LinkedHashMap<String, CheckpointAction>() {
|
||||||
|
{
|
||||||
|
put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
put("30", CheckpointAction.NONE);
|
||||||
|
put("332", CheckpointAction.WITH_SEQUENCE_NUMBER);
|
||||||
|
put("349", CheckpointAction.NONE);
|
||||||
|
put("4332", CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
put("4338", CheckpointAction.NONE);
|
||||||
|
put("5349", CheckpointAction.WITH_SEQUENCE_NUMBER);
|
||||||
|
put("5358", CheckpointAction.NONE);
|
||||||
|
put("64332", CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
put("64338", CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
put("65358", CheckpointAction.WITH_SEQUENCE_NUMBER);
|
||||||
|
put("764338", CheckpointAction.WITH_SEQUENCE_NUMBER);
|
||||||
|
put("765349", CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
put("765358", CheckpointAction.NONE);
|
||||||
|
put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
for (LinkedHashMap<String, CheckpointAction> testPlan : testPlans) {
|
||||||
|
processingCheckpointer =
|
||||||
|
new RecordProcessorCheckpointer(shardInfo, checkpoint, validator);
|
||||||
|
testMixedCheckpointCalls(processingCheckpointer, testPlan);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A utility function to simplify various sequences of intermixed updates to the checkpointer, and calls to
|
||||||
|
* checpoint() and checkpoint(sequenceNumber). Takes a map where the key is a new sequence number to set in the
|
||||||
|
* checkpointer and the value is a CheckpointAction indicating an action to take: NONE -> Set the sequence number,
|
||||||
|
* don't do anything else NO_SEQUENCE_NUMBER -> Set the sequence number and call checkpoint() WITH_SEQUENCE_NUMBER
|
||||||
|
* -> Set the sequence number and call checkpoint(sequenceNumber) with that sequence number
|
||||||
|
*
|
||||||
|
* @param processingCheckpointer
|
||||||
|
* @param checkpointValueAndAction
|
||||||
|
* A map describing which checkpoint value to set in the checkpointer, and what action to take
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
private void testMixedCheckpointCalls(RecordProcessorCheckpointer processingCheckpointer,
|
||||||
|
LinkedHashMap<String, CheckpointAction> checkpointValueAndAction) throws Exception {
|
||||||
|
|
||||||
|
for (Entry<String, CheckpointAction> entry : checkpointValueAndAction.entrySet()) {
|
||||||
|
ExtendedSequenceNumber lastCheckpointValue = processingCheckpointer.getLastCheckpointValue();
|
||||||
|
|
||||||
|
if (SentinelCheckpoint.SHARD_END.toString().equals(entry.getKey())) {
|
||||||
|
// Before shard end, we will pretend to do what we expect the shutdown task to do
|
||||||
|
processingCheckpointer.setSequenceNumberAtShardEnd(processingCheckpointer
|
||||||
|
.getLargestPermittedCheckpointValue());
|
||||||
|
}
|
||||||
|
// Advance the largest checkpoint and check that it is updated.
|
||||||
|
processingCheckpointer.setLargestPermittedCheckpointValue(new ExtendedSequenceNumber(entry.getKey()));
|
||||||
|
Assert.assertEquals("Expected the largest checkpoint value to be updated after setting it",
|
||||||
|
new ExtendedSequenceNumber(entry.getKey()),
|
||||||
|
processingCheckpointer.getLargestPermittedCheckpointValue());
|
||||||
|
switch (entry.getValue()) {
|
||||||
|
case NONE:
|
||||||
|
// We were told to not checkpoint, so lets just make sure the last checkpoint value is the same as
|
||||||
|
// when this block started then continue to the next instruction
|
||||||
|
Assert.assertEquals("Expected the last checkpoint value to stay the same if we didn't checkpoint",
|
||||||
|
lastCheckpointValue,
|
||||||
|
processingCheckpointer.getLastCheckpointValue());
|
||||||
|
continue;
|
||||||
|
case NO_SEQUENCE_NUMBER:
|
||||||
|
processingCheckpointer.checkpoint();
|
||||||
|
break;
|
||||||
|
case WITH_SEQUENCE_NUMBER:
|
||||||
|
processingCheckpointer.checkpoint(entry.getKey());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// We must have checkpointed to get here, so let's make sure our last checkpoint value is up to date
|
||||||
|
Assert.assertEquals("Expected the last checkpoint value to change after checkpointing",
|
||||||
|
new ExtendedSequenceNumber(entry.getKey()),
|
||||||
|
processingCheckpointer.getLastCheckpointValue());
|
||||||
|
Assert.assertEquals("Expected the largest checkpoint value to remain the same since the last set",
|
||||||
|
new ExtendedSequenceNumber(entry.getKey()),
|
||||||
|
processingCheckpointer.getLargestPermittedCheckpointValue());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,139 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
|
||||||
|
import com.amazonaws.services.kinesis.model.InvalidArgumentException;
|
||||||
|
import com.amazonaws.services.kinesis.model.ShardIteratorType;
|
||||||
|
|
||||||
|
public class SequenceNumberValidatorTest {
|
||||||
|
|
||||||
|
private final boolean validateWithGetIterator = true;
|
||||||
|
private final String shardId = "shardid-123";
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testSequenceNumberValidator() {
|
||||||
|
|
||||||
|
IKinesisProxy proxy = Mockito.mock(IKinesisProxy.class);
|
||||||
|
|
||||||
|
SequenceNumberValidator validator = new SequenceNumberValidator(proxy, shardId, validateWithGetIterator);
|
||||||
|
|
||||||
|
String goodSequence = "456";
|
||||||
|
String iterator = "happyiterator";
|
||||||
|
String badSequence = "789";
|
||||||
|
Mockito.doReturn(iterator)
|
||||||
|
.when(proxy)
|
||||||
|
.getIterator(shardId, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), goodSequence);
|
||||||
|
Mockito.doThrow(new InvalidArgumentException(""))
|
||||||
|
.when(proxy)
|
||||||
|
.getIterator(shardId, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), badSequence);
|
||||||
|
|
||||||
|
validator.validateSequenceNumber(goodSequence);
|
||||||
|
Mockito.verify(proxy, Mockito.times(1)).getIterator(shardId,
|
||||||
|
ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
|
||||||
|
goodSequence);
|
||||||
|
|
||||||
|
try {
|
||||||
|
validator.validateSequenceNumber(badSequence);
|
||||||
|
fail("Bad sequence number did not cause the validator to throw an exception");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
Mockito.verify(proxy, Mockito.times(1)).getIterator(shardId,
|
||||||
|
ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
|
||||||
|
badSequence);
|
||||||
|
}
|
||||||
|
|
||||||
|
nonNumericValueValidationTest(validator, proxy, validateWithGetIterator);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testNoValidation() {
|
||||||
|
IKinesisProxy proxy = Mockito.mock(IKinesisProxy.class);
|
||||||
|
String shardId = "shardid-123";
|
||||||
|
SequenceNumberValidator validator = new SequenceNumberValidator(proxy, shardId, !validateWithGetIterator);
|
||||||
|
String goodSequence = "456";
|
||||||
|
|
||||||
|
// Just checking that the false flag for validating against getIterator is honored
|
||||||
|
validator.validateSequenceNumber(goodSequence);
|
||||||
|
Mockito.verify(proxy, Mockito.times(0)).getIterator(shardId,
|
||||||
|
ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
|
||||||
|
goodSequence);
|
||||||
|
|
||||||
|
// Validator should still validate sentinel values
|
||||||
|
nonNumericValueValidationTest(validator, proxy, !validateWithGetIterator);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void nonNumericValueValidationTest(SequenceNumberValidator validator,
|
||||||
|
IKinesisProxy proxy,
|
||||||
|
boolean validateWithGetIterator) {
|
||||||
|
|
||||||
|
String[] nonNumericStrings =
|
||||||
|
{ null, "bogus-sequence-number", SentinelCheckpoint.LATEST.toString(),
|
||||||
|
SentinelCheckpoint.SHARD_END.toString(), SentinelCheckpoint.TRIM_HORIZON.toString() };
|
||||||
|
|
||||||
|
for (String nonNumericString : nonNumericStrings) {
|
||||||
|
try {
|
||||||
|
validator.validateSequenceNumber(nonNumericString);
|
||||||
|
fail("Validator should not consider " + nonNumericString + " a valid sequence number");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
// Non-numeric strings should always be rejected by the validator before the proxy can be called so we
|
||||||
|
// check that the proxy was not called at all
|
||||||
|
Mockito.verify(proxy, Mockito.times(0)).getIterator(shardId,
|
||||||
|
ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(),
|
||||||
|
nonNumericString);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testIsDigits() {
|
||||||
|
// Check things that are all digits
|
||||||
|
String[] stringsOfDigits = {
|
||||||
|
"0",
|
||||||
|
"12",
|
||||||
|
"07897803434",
|
||||||
|
"12324456576788",
|
||||||
|
};
|
||||||
|
for (String digits : stringsOfDigits) {
|
||||||
|
Assert.assertTrue("Expected that " + digits + " would be considered a string of digits.",
|
||||||
|
SequenceNumberValidator.isDigits(digits));
|
||||||
|
}
|
||||||
|
// Check things that are not all digits
|
||||||
|
String[] stringsWithNonDigits = {
|
||||||
|
null,
|
||||||
|
"",
|
||||||
|
" ", // white spaces
|
||||||
|
"6 4",
|
||||||
|
"\t45",
|
||||||
|
"5242354235234\n",
|
||||||
|
"7\n6\n5\n",
|
||||||
|
"12s", // last character
|
||||||
|
"c07897803434", // first character
|
||||||
|
"1232445wef6576788", // interior
|
||||||
|
"no-digits",
|
||||||
|
};
|
||||||
|
for (String notAllDigits : stringsWithNonDigits) {
|
||||||
|
Assert.assertFalse("Expected that " + notAllDigits + " would not be considered a string of digits.",
|
||||||
|
SequenceNumberValidator.isDigits(notAllDigits));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,365 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
|
import static org.hamcrest.Matchers.is;
|
||||||
|
import static org.hamcrest.Matchers.nullValue;
|
||||||
|
import static org.junit.Assert.assertThat;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
import static org.mockito.Matchers.any;
|
||||||
|
import static org.mockito.Matchers.anyString;
|
||||||
|
import static org.mockito.Mockito.doNothing;
|
||||||
|
import static org.mockito.Mockito.doThrow;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.spy;
|
||||||
|
import static org.mockito.Mockito.times;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.math.BigInteger;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.ListIterator;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.RejectedExecutionException;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.InMemoryCheckpointImpl;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShardConsumer.ShardConsumerState;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.amazonaws.services.kinesis.model.ShardIteratorType;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unit tests of {@link ShardConsumer}.
|
||||||
|
*/
|
||||||
|
public class ShardConsumerTest {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(ShardConsumerTest.class);
|
||||||
|
|
||||||
|
private final IMetricsFactory metricsFactory = new NullMetricsFactory();
|
||||||
|
private final boolean callProcessRecordsForEmptyRecordList = false;
|
||||||
|
private final long taskBackoffTimeMillis = 500L;
|
||||||
|
private final long parentShardPollIntervalMillis = 50L;
|
||||||
|
private final boolean cleanupLeasesOfCompletedShards = true;
|
||||||
|
// We don't want any of these tests to run checkpoint validation
|
||||||
|
private final boolean skipCheckpointValidationValue = false;
|
||||||
|
private final InitialPositionInStream initialPositionInStream = InitialPositionInStream.LATEST;
|
||||||
|
|
||||||
|
// Use Executors.newFixedThreadPool since it returns ThreadPoolExecutor, which is
|
||||||
|
// ... a non-final public class, and so can be mocked and spied.
|
||||||
|
private final ExecutorService executorService = Executors.newFixedThreadPool(1);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method to verify consumer stays in INITIALIZING state when InitializationTask fails.
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Test
|
||||||
|
public final void testInitializationStateUponFailure() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo("s-0-0", "testToken", null);
|
||||||
|
ICheckpoint checkpoint = mock(ICheckpoint.class);
|
||||||
|
|
||||||
|
when(checkpoint.getCheckpoint(anyString())).thenThrow(NullPointerException.class);
|
||||||
|
IRecordProcessor processor = mock(IRecordProcessor.class);
|
||||||
|
IKinesisProxy streamProxy = mock(IKinesisProxy.class);
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseManager.getLease(anyString())).thenReturn(null);
|
||||||
|
StreamConfig streamConfig =
|
||||||
|
new StreamConfig(streamProxy,
|
||||||
|
1,
|
||||||
|
10,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
initialPositionInStream);
|
||||||
|
|
||||||
|
ShardConsumer consumer =
|
||||||
|
new ShardConsumer(shardInfo,
|
||||||
|
streamConfig,
|
||||||
|
checkpoint,
|
||||||
|
processor,
|
||||||
|
null,
|
||||||
|
parentShardPollIntervalMillis,
|
||||||
|
cleanupLeasesOfCompletedShards,
|
||||||
|
executorService,
|
||||||
|
metricsFactory,
|
||||||
|
taskBackoffTimeMillis);
|
||||||
|
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
|
||||||
|
consumer.consumeShard(); // initialize
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
|
||||||
|
consumer.consumeShard(); // initialize
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
consumer.consumeShard(); // initialize
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
consumer.consumeShard(); // initialize
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method to verify consumer stays in INITIALIZING state when InitializationTask fails.
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Test
|
||||||
|
public final void testInitializationStateUponSubmissionFailure() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo("s-0-0", "testToken", null);
|
||||||
|
ICheckpoint checkpoint = mock(ICheckpoint.class);
|
||||||
|
ExecutorService spyExecutorService = spy(executorService);
|
||||||
|
|
||||||
|
when(checkpoint.getCheckpoint(anyString())).thenThrow(NullPointerException.class);
|
||||||
|
IRecordProcessor processor = mock(IRecordProcessor.class);
|
||||||
|
IKinesisProxy streamProxy = mock(IKinesisProxy.class);
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseManager.getLease(anyString())).thenReturn(null);
|
||||||
|
StreamConfig streamConfig =
|
||||||
|
new StreamConfig(streamProxy,
|
||||||
|
1,
|
||||||
|
10,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
initialPositionInStream);
|
||||||
|
|
||||||
|
ShardConsumer consumer =
|
||||||
|
new ShardConsumer(shardInfo,
|
||||||
|
streamConfig,
|
||||||
|
checkpoint,
|
||||||
|
processor,
|
||||||
|
null,
|
||||||
|
parentShardPollIntervalMillis,
|
||||||
|
cleanupLeasesOfCompletedShards,
|
||||||
|
spyExecutorService,
|
||||||
|
metricsFactory,
|
||||||
|
taskBackoffTimeMillis);
|
||||||
|
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
|
||||||
|
consumer.consumeShard(); // initialize
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
|
||||||
|
|
||||||
|
doThrow(new RejectedExecutionException()).when(spyExecutorService).submit(any(InitializeTask.class));
|
||||||
|
consumer.consumeShard(); // initialize
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
consumer.consumeShard(); // initialize
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
consumer.consumeShard(); // initialize
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Test
|
||||||
|
public final void testRecordProcessorThrowable() throws Exception {
|
||||||
|
ShardInfo shardInfo = new ShardInfo("s-0-0", "testToken", null);
|
||||||
|
ICheckpoint checkpoint = mock(ICheckpoint.class);
|
||||||
|
IRecordProcessor processor = mock(IRecordProcessor.class);
|
||||||
|
IKinesisProxy streamProxy = mock(IKinesisProxy.class);
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
StreamConfig streamConfig =
|
||||||
|
new StreamConfig(streamProxy,
|
||||||
|
1,
|
||||||
|
10,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
initialPositionInStream);
|
||||||
|
|
||||||
|
ShardConsumer consumer =
|
||||||
|
new ShardConsumer(shardInfo,
|
||||||
|
streamConfig,
|
||||||
|
checkpoint,
|
||||||
|
processor,
|
||||||
|
null,
|
||||||
|
parentShardPollIntervalMillis,
|
||||||
|
cleanupLeasesOfCompletedShards,
|
||||||
|
executorService,
|
||||||
|
metricsFactory,
|
||||||
|
taskBackoffTimeMillis);
|
||||||
|
|
||||||
|
when(leaseManager.getLease(anyString())).thenReturn(null);
|
||||||
|
when(checkpoint.getCheckpoint(anyString())).thenReturn(new ExtendedSequenceNumber("123"));
|
||||||
|
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
|
||||||
|
consumer.consumeShard(); // submit BlockOnParentShardTask
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
|
||||||
|
verify(processor, times(0)).initialize(any(InitializationInput.class));
|
||||||
|
|
||||||
|
// Throw Error when IRecordProcessor.initialize() is invoked.
|
||||||
|
doThrow(new Error("ThrowableTest")).when(processor).initialize(any(InitializationInput.class));
|
||||||
|
|
||||||
|
consumer.consumeShard(); // submit InitializeTask
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
verify(processor, times(1)).initialize(any(InitializationInput.class));
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Checking the status of submitted InitializeTask from above should throw exception.
|
||||||
|
consumer.consumeShard();
|
||||||
|
fail("ShardConsumer should have thrown exception.");
|
||||||
|
} catch (RuntimeException e) {
|
||||||
|
assertThat(e.getCause(), instanceOf(ExecutionException.class));
|
||||||
|
}
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
verify(processor, times(1)).initialize(any(InitializationInput.class));
|
||||||
|
|
||||||
|
doNothing().when(processor).initialize(any(InitializationInput.class));
|
||||||
|
|
||||||
|
consumer.consumeShard(); // submit InitializeTask again.
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
verify(processor, times(2)).initialize(any(InitializationInput.class));
|
||||||
|
|
||||||
|
// Checking the status of submitted InitializeTask from above should pass.
|
||||||
|
consumer.consumeShard();
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.PROCESSING)));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShardConsumer#consumeShard()}
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testConsumeShard() throws Exception {
|
||||||
|
int numRecs = 10;
|
||||||
|
BigInteger startSeqNum = BigInteger.ONE;
|
||||||
|
String streamShardId = "kinesis-0-0";
|
||||||
|
String testConcurrencyToken = "testToken";
|
||||||
|
File file =
|
||||||
|
KinesisLocalFileDataCreator.generateTempDataFile(1,
|
||||||
|
"kinesis-0-",
|
||||||
|
numRecs,
|
||||||
|
startSeqNum,
|
||||||
|
"unitTestSCT001");
|
||||||
|
|
||||||
|
IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath());
|
||||||
|
|
||||||
|
final int maxRecords = 2;
|
||||||
|
final int idleTimeMS = 0; // keep unit tests fast
|
||||||
|
ICheckpoint checkpoint = new InMemoryCheckpointImpl(startSeqNum.toString());
|
||||||
|
checkpoint.setCheckpoint(streamShardId, ExtendedSequenceNumber.TRIM_HORIZON, testConcurrencyToken);
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseManager.getLease(anyString())).thenReturn(null);
|
||||||
|
|
||||||
|
TestStreamlet processor = new TestStreamlet();
|
||||||
|
|
||||||
|
StreamConfig streamConfig =
|
||||||
|
new StreamConfig(fileBasedProxy,
|
||||||
|
maxRecords,
|
||||||
|
idleTimeMS,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
initialPositionInStream);
|
||||||
|
|
||||||
|
ShardInfo shardInfo = new ShardInfo(streamShardId, testConcurrencyToken, null);
|
||||||
|
ShardConsumer consumer =
|
||||||
|
new ShardConsumer(shardInfo,
|
||||||
|
streamConfig,
|
||||||
|
checkpoint,
|
||||||
|
processor,
|
||||||
|
leaseManager,
|
||||||
|
parentShardPollIntervalMillis,
|
||||||
|
cleanupLeasesOfCompletedShards,
|
||||||
|
executorService,
|
||||||
|
metricsFactory,
|
||||||
|
taskBackoffTimeMillis);
|
||||||
|
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)));
|
||||||
|
consumer.consumeShard(); // check on parent shards
|
||||||
|
Thread.sleep(50L);
|
||||||
|
consumer.consumeShard(); // start initialization
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.INITIALIZING)));
|
||||||
|
consumer.consumeShard(); // initialize
|
||||||
|
Thread.sleep(50L);
|
||||||
|
|
||||||
|
// We expect to process all records in numRecs calls
|
||||||
|
for (int i = 0; i < numRecs;) {
|
||||||
|
boolean newTaskSubmitted = consumer.consumeShard();
|
||||||
|
if (newTaskSubmitted) {
|
||||||
|
LOG.debug("New processing task was submitted, call # " + i);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.PROCESSING)));
|
||||||
|
// CHECKSTYLE:IGNORE ModifiedControlVariable FOR NEXT 1 LINES
|
||||||
|
i += maxRecords;
|
||||||
|
}
|
||||||
|
Thread.sleep(50L);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertThat(processor.getShutdownReason(), nullValue());
|
||||||
|
consumer.beginShutdown();
|
||||||
|
Thread.sleep(50L);
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.SHUTTING_DOWN)));
|
||||||
|
consumer.beginShutdown();
|
||||||
|
assertThat(consumer.getCurrentState(), is(equalTo(ShardConsumerState.SHUTDOWN_COMPLETE)));
|
||||||
|
assertThat(processor.getShutdownReason(), is(equalTo(ShutdownReason.ZOMBIE)));
|
||||||
|
|
||||||
|
executorService.shutdown();
|
||||||
|
executorService.awaitTermination(60, TimeUnit.SECONDS);
|
||||||
|
|
||||||
|
String iterator = fileBasedProxy.getIterator(streamShardId, ShardIteratorType.TRIM_HORIZON.toString(), null);
|
||||||
|
List<Record> expectedRecords = toUserRecords(fileBasedProxy.get(iterator, numRecs).getRecords());
|
||||||
|
verifyConsumedRecords(expectedRecords, processor.getProcessedRecords());
|
||||||
|
file.delete();
|
||||||
|
}
|
||||||
|
|
||||||
|
//@formatter:off (gets the formatting wrong)
|
||||||
|
private void verifyConsumedRecords(List<Record> expectedRecords,
|
||||||
|
List<Record> actualRecords) {
|
||||||
|
//@formatter:on
|
||||||
|
assertThat(actualRecords.size(), is(equalTo(expectedRecords.size())));
|
||||||
|
ListIterator<Record> expectedIter = expectedRecords.listIterator();
|
||||||
|
ListIterator<Record> actualIter = actualRecords.listIterator();
|
||||||
|
for (int i = 0; i < expectedRecords.size(); ++i) {
|
||||||
|
assertThat(actualIter.next(), is(equalTo(expectedIter.next())));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<Record> toUserRecords(List<Record> records) {
|
||||||
|
if (records == null || records.isEmpty()) {
|
||||||
|
return records;
|
||||||
|
}
|
||||||
|
List<Record> userRecords = new ArrayList<Record>();
|
||||||
|
for (Record record : records) {
|
||||||
|
userRecords.add(new UserRecord(record));
|
||||||
|
}
|
||||||
|
return userRecords;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,100 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
public class ShardInfoTest {
|
||||||
|
private static final String CONCURRENCY_TOKEN = UUID.randomUUID().toString();
|
||||||
|
private static final String SHARD_ID = "shardId-test";
|
||||||
|
private final Set<String> parentShardIds = new HashSet<>();
|
||||||
|
private ShardInfo testShardInfo;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUpPacboyShardInfo() {
|
||||||
|
// Add parent shard Ids
|
||||||
|
parentShardIds.add("shard-1");
|
||||||
|
parentShardIds.add("shard-2");
|
||||||
|
|
||||||
|
testShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPacboyShardInfoEqualsWithSameArgs() {
|
||||||
|
ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds);
|
||||||
|
Assert.assertTrue("Equal should return true for arguments all the same", testShardInfo.equals(equalShardInfo));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPacboyShardInfoEqualsWithNull() {
|
||||||
|
Assert.assertFalse("Equal should return false when object is null", testShardInfo.equals(null));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPacboyShardInfoEqualsForShardId() {
|
||||||
|
ShardInfo diffShardInfo = new ShardInfo("shardId-diff", CONCURRENCY_TOKEN, parentShardIds);
|
||||||
|
Assert.assertFalse("Equal should return false with different shard id", diffShardInfo.equals(testShardInfo));
|
||||||
|
diffShardInfo = new ShardInfo(null, CONCURRENCY_TOKEN, parentShardIds);
|
||||||
|
Assert.assertFalse("Equal should return false with null shard id", diffShardInfo.equals(testShardInfo));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPacboyShardInfoEqualsForfToken() {
|
||||||
|
ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, UUID.randomUUID().toString(), parentShardIds);
|
||||||
|
Assert.assertFalse("Equal should return false with different concurrency token",
|
||||||
|
diffShardInfo.equals(testShardInfo));
|
||||||
|
diffShardInfo = new ShardInfo(SHARD_ID, null, parentShardIds);
|
||||||
|
Assert.assertFalse("Equal should return false for null concurrency token", diffShardInfo.equals(testShardInfo));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPacboyShardInfoEqualsForDifferentlyOrderedParentIds() {
|
||||||
|
List<String> differentlyOrderedParentShardIds = new ArrayList<>();
|
||||||
|
differentlyOrderedParentShardIds.add("shard-2");
|
||||||
|
differentlyOrderedParentShardIds.add("shard-1");
|
||||||
|
ShardInfo shardInfoWithDifferentlyOrderedParentShardIds =
|
||||||
|
new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, differentlyOrderedParentShardIds);
|
||||||
|
Assert.assertTrue("Equal should return true even with parent shard Ids reordered",
|
||||||
|
shardInfoWithDifferentlyOrderedParentShardIds.equals(testShardInfo));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPacboyShardInfoEqualsForParentIds() {
|
||||||
|
Set<String> diffParentIds = new HashSet<>();
|
||||||
|
diffParentIds.add("shard-3");
|
||||||
|
diffParentIds.add("shard-4");
|
||||||
|
ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, diffParentIds);
|
||||||
|
Assert.assertFalse("Equal should return false with different parent shard Ids",
|
||||||
|
diffShardInfo.equals(testShardInfo));
|
||||||
|
diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, null);
|
||||||
|
Assert.assertFalse("Equal should return false with null parent shard Ids", diffShardInfo.equals(testShardInfo));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPacboyShardInfoSameHashCode() {
|
||||||
|
ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds);
|
||||||
|
Assert.assertTrue("Shard info objects should have same hashCode for the same arguments",
|
||||||
|
equalShardInfo.hashCode() == testShardInfo.hashCode());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,132 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import java.math.BigInteger;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.model.HashKeyRange;
|
||||||
|
import com.amazonaws.services.kinesis.model.SequenceNumberRange;
|
||||||
|
import com.amazonaws.services.kinesis.model.Shard;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper class to create Shard, SequenceRange and related objects.
|
||||||
|
*/
|
||||||
|
class ShardObjectHelper {
|
||||||
|
|
||||||
|
private static final int EXPONENT = 128;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Max value of a sequence number (2^128 -1). Useful for defining sequence number range for a shard.
|
||||||
|
*/
|
||||||
|
static final String MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Min value of a sequence number (0). Useful for defining sequence number range for a shard.
|
||||||
|
*/
|
||||||
|
static final String MIN_SEQUENCE_NUMBER = BigInteger.ZERO.toString();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Max value of a hash key (2^128 -1). Useful for defining hash key range for a shard.
|
||||||
|
*/
|
||||||
|
static final String MAX_HASH_KEY = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Min value of a hash key (0). Useful for defining sequence number range for a shard.
|
||||||
|
*/
|
||||||
|
public static final String MIN_HASH_KEY = BigInteger.ZERO.toString();
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
private ShardObjectHelper() {
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** Helper method to create a new shard object.
|
||||||
|
* @param shardId
|
||||||
|
* @param parentShardId
|
||||||
|
* @param adjacentParentShardId
|
||||||
|
* @param sequenceNumberRange
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
static Shard newShard(String shardId,
|
||||||
|
String parentShardId,
|
||||||
|
String adjacentParentShardId,
|
||||||
|
SequenceNumberRange sequenceNumberRange) {
|
||||||
|
return newShard(shardId, parentShardId, adjacentParentShardId, sequenceNumberRange, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Helper method to create a new shard object.
|
||||||
|
* @param shardId
|
||||||
|
* @param parentShardId
|
||||||
|
* @param adjacentParentShardId
|
||||||
|
* @param sequenceNumberRange
|
||||||
|
* @param hashKeyRange
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
static Shard newShard(String shardId,
|
||||||
|
String parentShardId,
|
||||||
|
String adjacentParentShardId,
|
||||||
|
SequenceNumberRange sequenceNumberRange,
|
||||||
|
HashKeyRange hashKeyRange) {
|
||||||
|
Shard shard = new Shard();
|
||||||
|
shard.setShardId(shardId);
|
||||||
|
shard.setParentShardId(parentShardId);
|
||||||
|
shard.setAdjacentParentShardId(adjacentParentShardId);
|
||||||
|
shard.setSequenceNumberRange(sequenceNumberRange);
|
||||||
|
shard.setHashKeyRange(hashKeyRange);
|
||||||
|
|
||||||
|
return shard;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Helper method.
|
||||||
|
* @param startingSequenceNumber
|
||||||
|
* @param endingSequenceNumber
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
static SequenceNumberRange newSequenceNumberRange(String startingSequenceNumber, String endingSequenceNumber) {
|
||||||
|
SequenceNumberRange range = new SequenceNumberRange();
|
||||||
|
range.setStartingSequenceNumber(startingSequenceNumber);
|
||||||
|
range.setEndingSequenceNumber(endingSequenceNumber);
|
||||||
|
return range;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Helper method.
|
||||||
|
* @param startingHashKey
|
||||||
|
* @param endingHashKey
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
static HashKeyRange newHashKeyRange(String startingHashKey, String endingHashKey) {
|
||||||
|
HashKeyRange range = new HashKeyRange();
|
||||||
|
range.setStartingHashKey(startingHashKey);
|
||||||
|
range.setEndingHashKey(endingHashKey);
|
||||||
|
return range;
|
||||||
|
}
|
||||||
|
|
||||||
|
static List<String> getParentShardIds(Shard shard) {
|
||||||
|
List<String> parentShardIds = new ArrayList<>(2);
|
||||||
|
if (shard.getAdjacentParentShardId() != null) {
|
||||||
|
parentShardIds.add(shard.getAdjacentParentShardId());
|
||||||
|
}
|
||||||
|
if (shard.getParentShardId() != null) {
|
||||||
|
parentShardIds.add(shard.getParentShardId());
|
||||||
|
}
|
||||||
|
return parentShardIds;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,81 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentSkipListSet;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.model.Shard;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper class to verify shard lineage in unit tests that use TestStreamlet.
|
||||||
|
* Verifies that parent shard processors were shutdown before child shard processor was initialized.
|
||||||
|
*/
|
||||||
|
class ShardSequenceVerifier {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(ShardSequenceVerifier.class);
|
||||||
|
private Map<String, Shard> shardIdToShards = new HashMap<String, Shard>();
|
||||||
|
private ConcurrentSkipListSet<String> initializedShards = new ConcurrentSkipListSet<>();
|
||||||
|
private ConcurrentSkipListSet<String> shutdownShards = new ConcurrentSkipListSet<>();
|
||||||
|
private List<String> validationFailures = Collections.synchronizedList(new ArrayList<String>());
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor with the shard list for the stream.
|
||||||
|
*/
|
||||||
|
ShardSequenceVerifier(List<Shard> shardList) {
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
shardIdToShards.put(shard.getShardId(), shard);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void registerInitialization(String shardId) {
|
||||||
|
List<String> parentShardIds = ShardObjectHelper.getParentShardIds(shardIdToShards.get(shardId));
|
||||||
|
for (String parentShardId : parentShardIds) {
|
||||||
|
if (initializedShards.contains(parentShardId)) {
|
||||||
|
if (!shutdownShards.contains(parentShardId)) {
|
||||||
|
String message = "Parent shard " + parentShardId + " was not shutdown before shard "
|
||||||
|
+ shardId + " was initialized.";
|
||||||
|
LOG.error(message);
|
||||||
|
validationFailures.add(message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
initializedShards.add(shardId);
|
||||||
|
}
|
||||||
|
|
||||||
|
void registerShutdown(String shardId, ShutdownReason reason) {
|
||||||
|
if (reason.equals(ShutdownReason.TERMINATE)) {
|
||||||
|
shutdownShards.add(shardId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void verify() {
|
||||||
|
for (String message : validationFailures) {
|
||||||
|
LOG.error(message);
|
||||||
|
}
|
||||||
|
Assert.assertTrue(validationFailures.isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,138 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.AmazonServiceException;
|
||||||
|
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||||
|
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
|
||||||
|
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
|
||||||
|
import com.amazonaws.services.kinesis.AmazonKinesis;
|
||||||
|
import com.amazonaws.services.kinesis.AmazonKinesisClient;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.IKinesisClientLeaseManager;
|
||||||
|
import com.amazonaws.services.kinesis.model.StreamStatus;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* WARN: to run this integration test you'll have to provide a AwsCredentials.properties file on the classpath.
|
||||||
|
*/
|
||||||
|
public class ShardSyncTaskIntegrationTest {
|
||||||
|
|
||||||
|
private static final String STREAM_NAME = "IntegrationTestStream02";
|
||||||
|
private static final String KINESIS_ENDPOINT = "https://kinesis.us-east-1.amazonaws.com";
|
||||||
|
|
||||||
|
private static AWSCredentialsProvider credentialsProvider;
|
||||||
|
private IKinesisClientLeaseManager leaseManager;
|
||||||
|
private IKinesisProxy kinesisProxy;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
credentialsProvider = new DefaultAWSCredentialsProviderChain();
|
||||||
|
AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider);
|
||||||
|
|
||||||
|
try {
|
||||||
|
kinesis.createStream(STREAM_NAME, 1);
|
||||||
|
} catch (AmazonServiceException ase) {
|
||||||
|
|
||||||
|
}
|
||||||
|
StreamStatus status;
|
||||||
|
do {
|
||||||
|
status = StreamStatus.fromValue(kinesis.describeStream(STREAM_NAME).getStreamDescription().getStreamStatus());
|
||||||
|
} while (status != StreamStatus.ACTIVE);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
boolean useConsistentReads = true;
|
||||||
|
leaseManager =
|
||||||
|
new KinesisClientLeaseManager("ShardSyncTaskIntegrationTest",
|
||||||
|
new AmazonDynamoDBClient(credentialsProvider),
|
||||||
|
useConsistentReads);
|
||||||
|
kinesisProxy =
|
||||||
|
new KinesisProxy(STREAM_NAME,
|
||||||
|
new DefaultAWSCredentialsProviderChain(),
|
||||||
|
KINESIS_ENDPOINT);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for call().
|
||||||
|
*
|
||||||
|
* @throws CapacityExceededException
|
||||||
|
* @throws DependencyException
|
||||||
|
* @throws InvalidStateException
|
||||||
|
* @throws ProvisionedThroughputException
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCall() throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
if (!leaseManager.leaseTableExists()) {
|
||||||
|
final Long readCapacity = 10L;
|
||||||
|
final Long writeCapacity = 10L;
|
||||||
|
leaseManager.createLeaseTableIfNotExists(readCapacity, writeCapacity);
|
||||||
|
}
|
||||||
|
leaseManager.deleteAll();
|
||||||
|
Set<String> shardIds = kinesisProxy.getAllShardIds();
|
||||||
|
ShardSyncTask syncTask =
|
||||||
|
new ShardSyncTask(kinesisProxy, leaseManager, InitialPositionInStream.LATEST, false, 0L);
|
||||||
|
syncTask.call();
|
||||||
|
List<KinesisClientLease> leases = leaseManager.listLeases();
|
||||||
|
Set<String> leaseKeys = new HashSet<String>();
|
||||||
|
for (KinesisClientLease lease : leases) {
|
||||||
|
leaseKeys.add(lease.getLeaseKey());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that all shardIds had leases for them
|
||||||
|
Assert.assertEquals(shardIds.size(), leases.size());
|
||||||
|
shardIds.removeAll(leaseKeys);
|
||||||
|
Assert.assertTrue(shardIds.isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,141 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.KinesisClientLibIOException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class ShutdownTaskTest {
|
||||||
|
private static final long TASK_BACKOFF_TIME_MILLIS = 1L;
|
||||||
|
Set<String> defaultParentShardIds = new HashSet<>();
|
||||||
|
String defaultConcurrencyToken = "testToken4398";
|
||||||
|
String defaultShardId = "shardId-0000397840";
|
||||||
|
ShardInfo defaultShardInfo = new ShardInfo(defaultShardId,
|
||||||
|
defaultConcurrencyToken,
|
||||||
|
defaultParentShardIds);
|
||||||
|
IRecordProcessor defaultRecordProcessor = new TestStreamlet();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownTask#call()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCallWhenApplicationDoesNotCheckpoint() {
|
||||||
|
RecordProcessorCheckpointer checkpointer = mock(RecordProcessorCheckpointer.class);
|
||||||
|
when(checkpointer.getLastCheckpointValue()).thenReturn(new ExtendedSequenceNumber("3298"));
|
||||||
|
IKinesisProxy kinesisProxy = mock(IKinesisProxy.class);
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(KinesisClientLeaseManager.class);
|
||||||
|
boolean cleanupLeasesOfCompletedShards = false;
|
||||||
|
ShutdownTask task =
|
||||||
|
new ShutdownTask(defaultShardInfo,
|
||||||
|
defaultRecordProcessor,
|
||||||
|
checkpointer,
|
||||||
|
ShutdownReason.TERMINATE,
|
||||||
|
kinesisProxy,
|
||||||
|
InitialPositionInStream.TRIM_HORIZON,
|
||||||
|
cleanupLeasesOfCompletedShards ,
|
||||||
|
leaseManager,
|
||||||
|
TASK_BACKOFF_TIME_MILLIS);
|
||||||
|
TaskResult result = task.call();
|
||||||
|
Assert.assertNotNull(result.getException());
|
||||||
|
Assert.assertTrue(result.getException() instanceof IllegalArgumentException);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownTask#call()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testCallWhenSyncingShardsThrows() {
|
||||||
|
RecordProcessorCheckpointer checkpointer = mock(RecordProcessorCheckpointer.class);
|
||||||
|
when(checkpointer.getLastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END);
|
||||||
|
IKinesisProxy kinesisProxy = mock(IKinesisProxy.class);
|
||||||
|
when(kinesisProxy.getShardList()).thenReturn(null);
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(KinesisClientLeaseManager.class);
|
||||||
|
boolean cleanupLeasesOfCompletedShards = false;
|
||||||
|
ShutdownTask task =
|
||||||
|
new ShutdownTask(defaultShardInfo,
|
||||||
|
defaultRecordProcessor,
|
||||||
|
checkpointer,
|
||||||
|
ShutdownReason.TERMINATE,
|
||||||
|
kinesisProxy,
|
||||||
|
InitialPositionInStream.TRIM_HORIZON,
|
||||||
|
cleanupLeasesOfCompletedShards ,
|
||||||
|
leaseManager,
|
||||||
|
TASK_BACKOFF_TIME_MILLIS);
|
||||||
|
TaskResult result = task.call();
|
||||||
|
Assert.assertNotNull(result.getException());
|
||||||
|
Assert.assertTrue(result.getException() instanceof KinesisClientLibIOException);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownTask#getTaskType()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testGetTaskType() {
|
||||||
|
ShutdownTask task = new ShutdownTask(null, null, null, null, null, null, false, null, 0);
|
||||||
|
Assert.assertEquals(TaskType.SHUTDOWN, task.getTaskType());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,151 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.Semaphore;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibNonRetryableException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Streamlet that tracks records it's seen - useful for testing.
|
||||||
|
*/
|
||||||
|
class TestStreamlet implements IRecordProcessor {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(TestStreamlet.class);
|
||||||
|
|
||||||
|
private List<Record> records = new ArrayList<Record>();
|
||||||
|
|
||||||
|
private Set<String> processedSeqNums = new HashSet<String>(); // used for deduping
|
||||||
|
|
||||||
|
private Semaphore sem; // used to allow test cases to wait for all records to be processed
|
||||||
|
|
||||||
|
private String shardId;
|
||||||
|
|
||||||
|
// record the last shutdown reason we were called with.
|
||||||
|
private ShutdownReason shutdownReason;
|
||||||
|
private ShardSequenceVerifier shardSequenceVerifier;
|
||||||
|
private long numProcessRecordsCallsWithEmptyRecordList;
|
||||||
|
|
||||||
|
public TestStreamlet() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public TestStreamlet(Semaphore sem, ShardSequenceVerifier shardSequenceVerifier) {
|
||||||
|
this();
|
||||||
|
this.sem = sem;
|
||||||
|
this.shardSequenceVerifier = shardSequenceVerifier;
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<Record> getProcessedRecords() {
|
||||||
|
return records;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void initialize(InitializationInput input) {
|
||||||
|
shardId = input.getShardId();
|
||||||
|
if (shardSequenceVerifier != null) {
|
||||||
|
shardSequenceVerifier.registerInitialization(shardId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processRecords(ProcessRecordsInput input) {
|
||||||
|
List<Record> dataRecords = input.getRecords();
|
||||||
|
IRecordProcessorCheckpointer checkpointer = input.getCheckpointer();
|
||||||
|
if ((dataRecords != null) && (!dataRecords.isEmpty())) {
|
||||||
|
for (Record record : dataRecords) {
|
||||||
|
LOG.debug("Processing record: " + record);
|
||||||
|
String seqNum = record.getSequenceNumber();
|
||||||
|
if (!processedSeqNums.contains(seqNum)) {
|
||||||
|
records.add(record);
|
||||||
|
processedSeqNums.add(seqNum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (dataRecords.isEmpty()) {
|
||||||
|
numProcessRecordsCallsWithEmptyRecordList++;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
checkpointer.checkpoint();
|
||||||
|
} catch (ThrottlingException | ShutdownException
|
||||||
|
| KinesisClientLibDependencyException | InvalidStateException e) {
|
||||||
|
// Continue processing records and checkpoint next time if we get a transient error.
|
||||||
|
// Don't checkpoint if the processor has been shutdown.
|
||||||
|
LOG.debug("Caught exception while checkpointing: ", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sem != null) {
|
||||||
|
sem.release(dataRecords.size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void shutdown(ShutdownInput input) {
|
||||||
|
ShutdownReason reason = input.getShutdownReason();
|
||||||
|
IRecordProcessorCheckpointer checkpointer = input.getCheckpointer();
|
||||||
|
if (shardSequenceVerifier != null) {
|
||||||
|
shardSequenceVerifier.registerShutdown(shardId, reason);
|
||||||
|
}
|
||||||
|
shutdownReason = reason;
|
||||||
|
if (reason.equals(ShutdownReason.TERMINATE)) {
|
||||||
|
try {
|
||||||
|
checkpointer.checkpoint();
|
||||||
|
} catch (KinesisClientLibNonRetryableException e) {
|
||||||
|
LOG.error("Caught exception when checkpointing while shutdown.", e);
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the shardId
|
||||||
|
*/
|
||||||
|
String getShardId() {
|
||||||
|
return shardId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the shutdownReason
|
||||||
|
*/
|
||||||
|
ShutdownReason getShutdownReason() {
|
||||||
|
return shutdownReason;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the numProcessRecordsCallsWithEmptyRecordList
|
||||||
|
*/
|
||||||
|
long getNumProcessRecordsCallsWithEmptyRecordList() {
|
||||||
|
return numProcessRecordsCallsWithEmptyRecordList;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,64 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.concurrent.Semaphore;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factory for TestStreamlet record processors.
|
||||||
|
*/
|
||||||
|
class TestStreamletFactory implements IRecordProcessorFactory {
|
||||||
|
|
||||||
|
// Will be passed to the TestStreamlet. Can be used to check if all records have been processed.
|
||||||
|
private Semaphore semaphore;
|
||||||
|
private ShardSequenceVerifier shardSequenceVerifier;
|
||||||
|
List<TestStreamlet> testStreamlets = new ArrayList<>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
*/
|
||||||
|
TestStreamletFactory(Semaphore semaphore, ShardSequenceVerifier shardSequenceVerifier) {
|
||||||
|
this.semaphore = semaphore;
|
||||||
|
this.shardSequenceVerifier = shardSequenceVerifier;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized IRecordProcessor createProcessor() {
|
||||||
|
TestStreamlet processor = new TestStreamlet(semaphore, shardSequenceVerifier);
|
||||||
|
testStreamlets.add(processor);
|
||||||
|
return processor;
|
||||||
|
}
|
||||||
|
|
||||||
|
Semaphore getSemaphore() {
|
||||||
|
return semaphore;
|
||||||
|
}
|
||||||
|
|
||||||
|
ShardSequenceVerifier getShardSequenceVerifier() {
|
||||||
|
return shardSequenceVerifier;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the testStreamlets
|
||||||
|
*/
|
||||||
|
List<TestStreamlet> getTestStreamlets() {
|
||||||
|
return testStreamlets;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,924 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
|
||||||
|
|
||||||
|
import static org.mockito.Matchers.any;
|
||||||
|
import static org.mockito.Mockito.atLeast;
|
||||||
|
import static org.mockito.Mockito.doAnswer;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.times;
|
||||||
|
import static org.mockito.Mockito.verify;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.lang.Thread.State;
|
||||||
|
import java.math.BigInteger;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.ListIterator;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.Semaphore;
|
||||||
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
|
import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Ignore;
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.rules.Timeout;
|
||||||
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
|
import org.mockito.stubbing.Answer;
|
||||||
|
|
||||||
|
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibNonRetryableException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker.WorkerCWMetricsFactory;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker.WorkerThreadPoolExecutor;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager;
|
||||||
|
import com.amazonaws.services.kinesis.leases.impl.LeaseManager;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.CWMetricsFactory;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
|
||||||
|
import com.amazonaws.services.kinesis.model.HashKeyRange;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.amazonaws.services.kinesis.model.SequenceNumberRange;
|
||||||
|
import com.amazonaws.services.kinesis.model.Shard;
|
||||||
|
import com.amazonaws.services.kinesis.model.ShardIteratorType;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unit tests of Worker.
|
||||||
|
*/
|
||||||
|
public class WorkerTest {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(WorkerTest.class);
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public Timeout timeout = new Timeout((int)TimeUnit.SECONDS.toMillis(30));
|
||||||
|
|
||||||
|
private final NullMetricsFactory nullMetricsFactory = new NullMetricsFactory();
|
||||||
|
private final long taskBackoffTimeMillis = 1L;
|
||||||
|
private final long failoverTimeMillis = 5L;
|
||||||
|
private final boolean callProcessRecordsForEmptyRecordList = false;
|
||||||
|
private final long parentShardPollIntervalMillis = 5L;
|
||||||
|
private final long shardSyncIntervalMillis = 5L;
|
||||||
|
private final boolean cleanupLeasesUponShardCompletion = true;
|
||||||
|
// We don't want any of these tests to run checkpoint validation
|
||||||
|
private final boolean skipCheckpointValidationValue = false;
|
||||||
|
private final InitialPositionInStream initialPositionInStream = InitialPositionInStream.LATEST;
|
||||||
|
|
||||||
|
// CHECKSTYLE:IGNORE AnonInnerLengthCheck FOR NEXT 50 LINES
|
||||||
|
private static final com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY =
|
||||||
|
new com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor createProcessor() {
|
||||||
|
return new com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) {
|
||||||
|
if (reason == ShutdownReason.TERMINATE) {
|
||||||
|
try {
|
||||||
|
checkpointer.checkpoint();
|
||||||
|
} catch (KinesisClientLibNonRetryableException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void processRecords(List<Record> dataRecords, IRecordProcessorCheckpointer checkpointer) {
|
||||||
|
try {
|
||||||
|
checkpointer.checkpoint();
|
||||||
|
} catch (KinesisClientLibNonRetryableException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void initialize(String shardId) {
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private static final IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY_V2 =
|
||||||
|
new V1ToV2RecordProcessorFactoryAdapter(SAMPLE_RECORD_PROCESSOR_FACTORY);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#getApplicationName()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testGetStageName() {
|
||||||
|
final String stageName = "testStageName";
|
||||||
|
final KinesisClientLibConfiguration clientConfig =
|
||||||
|
new KinesisClientLibConfiguration(stageName, null, null, null);
|
||||||
|
Worker worker =
|
||||||
|
new Worker(mock(com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory.class),
|
||||||
|
clientConfig);
|
||||||
|
Assert.assertEquals(stageName, worker.getApplicationName());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testCreateOrGetShardConsumer() {
|
||||||
|
final String stageName = "testStageName";
|
||||||
|
IRecordProcessorFactory streamletFactory = SAMPLE_RECORD_PROCESSOR_FACTORY_V2;
|
||||||
|
IKinesisProxy proxy = null;
|
||||||
|
ICheckpoint checkpoint = null;
|
||||||
|
int maxRecords = 1;
|
||||||
|
int idleTimeInMilliseconds = 1000;
|
||||||
|
StreamConfig streamConfig =
|
||||||
|
new StreamConfig(proxy,
|
||||||
|
maxRecords,
|
||||||
|
idleTimeInMilliseconds,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
initialPositionInStream);
|
||||||
|
final String testConcurrencyToken = "testToken";
|
||||||
|
final String anotherConcurrencyToken = "anotherTestToken";
|
||||||
|
final String dummyKinesisShardId = "kinesis-0-0";
|
||||||
|
ExecutorService execService = null;
|
||||||
|
|
||||||
|
KinesisClientLibLeaseCoordinator leaseCoordinator = mock(KinesisClientLibLeaseCoordinator.class);
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager);
|
||||||
|
|
||||||
|
Worker worker =
|
||||||
|
new Worker(stageName,
|
||||||
|
streamletFactory,
|
||||||
|
streamConfig,
|
||||||
|
InitialPositionInStream.LATEST,
|
||||||
|
parentShardPollIntervalMillis,
|
||||||
|
shardSyncIntervalMillis,
|
||||||
|
cleanupLeasesUponShardCompletion,
|
||||||
|
checkpoint,
|
||||||
|
leaseCoordinator,
|
||||||
|
execService,
|
||||||
|
nullMetricsFactory,
|
||||||
|
taskBackoffTimeMillis,
|
||||||
|
failoverTimeMillis);
|
||||||
|
ShardInfo shardInfo = new ShardInfo(dummyKinesisShardId, testConcurrencyToken, null);
|
||||||
|
ShardConsumer consumer = worker.createOrGetShardConsumer(shardInfo, streamletFactory);
|
||||||
|
Assert.assertNotNull(consumer);
|
||||||
|
ShardConsumer consumer2 = worker.createOrGetShardConsumer(shardInfo, streamletFactory);
|
||||||
|
Assert.assertSame(consumer, consumer2);
|
||||||
|
ShardInfo shardInfoWithSameShardIdButDifferentConcurrencyToken =
|
||||||
|
new ShardInfo(dummyKinesisShardId, anotherConcurrencyToken, null);
|
||||||
|
ShardConsumer consumer3 =
|
||||||
|
worker.createOrGetShardConsumer(shardInfoWithSameShardIdButDifferentConcurrencyToken, streamletFactory);
|
||||||
|
Assert.assertNotNull(consumer3);
|
||||||
|
Assert.assertNotSame(consumer3, consumer);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testCleanupShardConsumers() {
|
||||||
|
final String stageName = "testStageName";
|
||||||
|
IRecordProcessorFactory streamletFactory = SAMPLE_RECORD_PROCESSOR_FACTORY_V2;
|
||||||
|
IKinesisProxy proxy = null;
|
||||||
|
ICheckpoint checkpoint = null;
|
||||||
|
int maxRecords = 1;
|
||||||
|
int idleTimeInMilliseconds = 1000;
|
||||||
|
StreamConfig streamConfig =
|
||||||
|
new StreamConfig(proxy,
|
||||||
|
maxRecords,
|
||||||
|
idleTimeInMilliseconds,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
initialPositionInStream);
|
||||||
|
final String concurrencyToken = "testToken";
|
||||||
|
final String anotherConcurrencyToken = "anotherTestToken";
|
||||||
|
final String dummyKinesisShardId = "kinesis-0-0";
|
||||||
|
final String anotherDummyKinesisShardId = "kinesis-0-1";
|
||||||
|
ExecutorService execService = null;
|
||||||
|
|
||||||
|
KinesisClientLibLeaseCoordinator leaseCoordinator = mock(KinesisClientLibLeaseCoordinator.class);
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager);
|
||||||
|
|
||||||
|
Worker worker =
|
||||||
|
new Worker(stageName,
|
||||||
|
streamletFactory,
|
||||||
|
streamConfig,
|
||||||
|
InitialPositionInStream.LATEST,
|
||||||
|
parentShardPollIntervalMillis,
|
||||||
|
shardSyncIntervalMillis,
|
||||||
|
cleanupLeasesUponShardCompletion,
|
||||||
|
checkpoint,
|
||||||
|
leaseCoordinator,
|
||||||
|
execService,
|
||||||
|
nullMetricsFactory,
|
||||||
|
taskBackoffTimeMillis,
|
||||||
|
failoverTimeMillis);
|
||||||
|
|
||||||
|
ShardInfo shardInfo1 = new ShardInfo(dummyKinesisShardId, concurrencyToken, null);
|
||||||
|
ShardInfo duplicateOfShardInfo1ButWithAnotherConcurrencyToken =
|
||||||
|
new ShardInfo(dummyKinesisShardId, anotherConcurrencyToken, null);
|
||||||
|
ShardInfo shardInfo2 = new ShardInfo(anotherDummyKinesisShardId, concurrencyToken, null);
|
||||||
|
|
||||||
|
ShardConsumer consumerOfShardInfo1 = worker.createOrGetShardConsumer(shardInfo1, streamletFactory);
|
||||||
|
ShardConsumer consumerOfDuplicateOfShardInfo1ButWithAnotherConcurrencyToken =
|
||||||
|
worker.createOrGetShardConsumer(duplicateOfShardInfo1ButWithAnotherConcurrencyToken, streamletFactory);
|
||||||
|
ShardConsumer consumerOfShardInfo2 = worker.createOrGetShardConsumer(shardInfo2, streamletFactory);
|
||||||
|
|
||||||
|
Set<ShardInfo> assignedShards = new HashSet<ShardInfo>();
|
||||||
|
assignedShards.add(shardInfo1);
|
||||||
|
assignedShards.add(shardInfo2);
|
||||||
|
worker.cleanupShardConsumers(assignedShards);
|
||||||
|
|
||||||
|
// verify shard consumer not present in assignedShards is shut down
|
||||||
|
Assert.assertTrue(consumerOfDuplicateOfShardInfo1ButWithAnotherConcurrencyToken.isBeginShutdown());
|
||||||
|
// verify shard consumers present in assignedShards aren't shut down
|
||||||
|
Assert.assertFalse(consumerOfShardInfo1.isBeginShutdown());
|
||||||
|
Assert.assertFalse(consumerOfShardInfo2.isBeginShutdown());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testInitializationFailureWithRetries() {
|
||||||
|
String stageName = "testInitializationWorker";
|
||||||
|
IRecordProcessorFactory recordProcessorFactory = new TestStreamletFactory(null, null);
|
||||||
|
IKinesisProxy proxy = mock(IKinesisProxy.class);
|
||||||
|
int count = 0;
|
||||||
|
when(proxy.getShardList()).thenThrow(new RuntimeException(Integer.toString(count++)));
|
||||||
|
int maxRecords = 2;
|
||||||
|
long idleTimeInMilliseconds = 1L;
|
||||||
|
StreamConfig streamConfig =
|
||||||
|
new StreamConfig(proxy,
|
||||||
|
maxRecords,
|
||||||
|
idleTimeInMilliseconds,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
initialPositionInStream);
|
||||||
|
KinesisClientLibLeaseCoordinator leaseCoordinator = mock(KinesisClientLibLeaseCoordinator.class);
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = mock(ILeaseManager.class);
|
||||||
|
when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager);
|
||||||
|
ExecutorService execService = Executors.newSingleThreadExecutor();
|
||||||
|
long shardPollInterval = 0L;
|
||||||
|
Worker worker =
|
||||||
|
new Worker(stageName,
|
||||||
|
recordProcessorFactory,
|
||||||
|
streamConfig,
|
||||||
|
InitialPositionInStream.TRIM_HORIZON,
|
||||||
|
shardPollInterval,
|
||||||
|
shardSyncIntervalMillis,
|
||||||
|
cleanupLeasesUponShardCompletion,
|
||||||
|
leaseCoordinator,
|
||||||
|
leaseCoordinator,
|
||||||
|
execService,
|
||||||
|
nullMetricsFactory,
|
||||||
|
taskBackoffTimeMillis,
|
||||||
|
failoverTimeMillis);
|
||||||
|
worker.run();
|
||||||
|
Assert.assertTrue(count > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs worker with threadPoolSize == numShards
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testRunWithThreadPoolSizeEqualToNumShards() throws Exception {
|
||||||
|
final int numShards = 1;
|
||||||
|
final int threadPoolSize = numShards;
|
||||||
|
runAndTestWorker(numShards, threadPoolSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs worker with threadPoolSize < numShards
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testRunWithThreadPoolSizeLessThanNumShards() throws Exception {
|
||||||
|
final int numShards = 3;
|
||||||
|
final int threadPoolSize = 2;
|
||||||
|
runAndTestWorker(numShards, threadPoolSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs worker with threadPoolSize > numShards
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testRunWithThreadPoolSizeMoreThanNumShards() throws Exception {
|
||||||
|
final int numShards = 3;
|
||||||
|
final int threadPoolSize = 5;
|
||||||
|
runAndTestWorker(numShards, threadPoolSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs worker with threadPoolSize < numShards
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testOneSplitShard2Threads() throws Exception {
|
||||||
|
final int threadPoolSize = 2;
|
||||||
|
final int numberOfRecordsPerShard = 10;
|
||||||
|
List<Shard> shardList = createShardListWithOneSplit();
|
||||||
|
List<KinesisClientLease> initialLeases = new ArrayList<KinesisClientLease>();
|
||||||
|
KinesisClientLease lease = ShardSyncer.newKCLLease(shardList.get(0));
|
||||||
|
lease.setCheckpoint(new ExtendedSequenceNumber("2"));
|
||||||
|
initialLeases.add(lease);
|
||||||
|
runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Runs worker with threadPoolSize < numShards
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testOneSplitShard2ThreadsWithCallsForEmptyRecords() throws Exception {
|
||||||
|
final int threadPoolSize = 2;
|
||||||
|
final int numberOfRecordsPerShard = 10;
|
||||||
|
List<Shard> shardList = createShardListWithOneSplit();
|
||||||
|
List<KinesisClientLease> initialLeases = new ArrayList<KinesisClientLease>();
|
||||||
|
KinesisClientLease lease = ShardSyncer.newKCLLease(shardList.get(0));
|
||||||
|
lease.setCheckpoint(new ExtendedSequenceNumber("2"));
|
||||||
|
initialLeases.add(lease);
|
||||||
|
boolean callProcessRecordsForEmptyRecordList = true;
|
||||||
|
runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testWorkerShutsDownOwnedResources() throws Exception {
|
||||||
|
final WorkerThreadPoolExecutor executorService = mock(WorkerThreadPoolExecutor.class);
|
||||||
|
final WorkerCWMetricsFactory cwMetricsFactory = mock(WorkerCWMetricsFactory.class);
|
||||||
|
final long failoverTimeMillis = 20L;
|
||||||
|
|
||||||
|
// Make sure that worker thread is run before invoking shutdown.
|
||||||
|
final CountDownLatch workerStarted = new CountDownLatch(1);
|
||||||
|
doAnswer(new Answer<Boolean>() {
|
||||||
|
@Override
|
||||||
|
public Boolean answer(InvocationOnMock invocation) throws Throwable {
|
||||||
|
workerStarted.countDown();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}).when(executorService).isShutdown();
|
||||||
|
|
||||||
|
final WorkerThread workerThread = runWorker(Collections.<Shard>emptyList(),
|
||||||
|
Collections.<KinesisClientLease>emptyList(),
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
failoverTimeMillis,
|
||||||
|
10,
|
||||||
|
mock(IKinesisProxy.class),
|
||||||
|
mock(IRecordProcessorFactory.class),
|
||||||
|
executorService,
|
||||||
|
cwMetricsFactory);
|
||||||
|
|
||||||
|
// Give some time for thread to run.
|
||||||
|
workerStarted.await();
|
||||||
|
|
||||||
|
workerThread.getWorker().shutdown();
|
||||||
|
workerThread.join();
|
||||||
|
|
||||||
|
Assert.assertTrue(workerThread.getState() == State.TERMINATED);
|
||||||
|
verify(executorService, times(1)).shutdownNow();
|
||||||
|
verify(cwMetricsFactory, times(1)).shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testWorkerDoesNotShutdownClientResources() throws Exception {
|
||||||
|
final ExecutorService executorService = mock(ThreadPoolExecutor.class);
|
||||||
|
final CWMetricsFactory cwMetricsFactory = mock(CWMetricsFactory.class);
|
||||||
|
final long failoverTimeMillis = 20L;
|
||||||
|
|
||||||
|
// Make sure that worker thread is run before invoking shutdown.
|
||||||
|
final CountDownLatch workerStarted = new CountDownLatch(1);
|
||||||
|
doAnswer(new Answer<Boolean>() {
|
||||||
|
@Override
|
||||||
|
public Boolean answer(InvocationOnMock invocation) throws Throwable {
|
||||||
|
workerStarted.countDown();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}).when(executorService).isShutdown();
|
||||||
|
|
||||||
|
final WorkerThread workerThread = runWorker(Collections.<Shard>emptyList(),
|
||||||
|
Collections.<KinesisClientLease>emptyList(),
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
failoverTimeMillis,
|
||||||
|
10,
|
||||||
|
mock(IKinesisProxy.class),
|
||||||
|
mock(IRecordProcessorFactory.class),
|
||||||
|
executorService,
|
||||||
|
cwMetricsFactory);
|
||||||
|
|
||||||
|
// Give some time for thread to run.
|
||||||
|
workerStarted.await();
|
||||||
|
|
||||||
|
workerThread.getWorker().shutdown();
|
||||||
|
workerThread.join();
|
||||||
|
|
||||||
|
Assert.assertTrue(workerThread.getState() == State.TERMINATED);
|
||||||
|
verify(executorService, times(0)).shutdownNow();
|
||||||
|
verify(cwMetricsFactory, times(0)).shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testWorkerNormalShutdown() throws Exception {
|
||||||
|
final List<Shard> shardList = createShardListWithOneShard();
|
||||||
|
final boolean callProcessRecordsForEmptyRecordList = true;
|
||||||
|
final long failoverTimeMillis = 50L;
|
||||||
|
final int numberOfRecordsPerShard = 1000;
|
||||||
|
|
||||||
|
final List<KinesisClientLease> initialLeases = new ArrayList<KinesisClientLease>();
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
KinesisClientLease lease = ShardSyncer.newKCLLease(shard);
|
||||||
|
lease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||||
|
initialLeases.add(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
final File file = KinesisLocalFileDataCreator.generateTempDataFile(
|
||||||
|
shardList, numberOfRecordsPerShard, "normalShutdownUnitTest");
|
||||||
|
final IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath());
|
||||||
|
|
||||||
|
final ExecutorService executorService = Executors.newCachedThreadPool();
|
||||||
|
|
||||||
|
// Make test case as efficient as possible.
|
||||||
|
final CountDownLatch processRecordsLatch = new CountDownLatch(1);
|
||||||
|
IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class);
|
||||||
|
IRecordProcessor recordProcessor = mock(IRecordProcessor.class);
|
||||||
|
when(recordProcessorFactory.createProcessor()).thenReturn(recordProcessor);
|
||||||
|
|
||||||
|
doAnswer(new Answer<Object> () {
|
||||||
|
@Override
|
||||||
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
|
// Signal that record processor has started processing records.
|
||||||
|
processRecordsLatch.countDown();
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}).when(recordProcessor).processRecords(any(ProcessRecordsInput.class));
|
||||||
|
|
||||||
|
WorkerThread workerThread = runWorker(shardList,
|
||||||
|
initialLeases,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
failoverTimeMillis,
|
||||||
|
numberOfRecordsPerShard,
|
||||||
|
fileBasedProxy,
|
||||||
|
recordProcessorFactory,
|
||||||
|
executorService,
|
||||||
|
nullMetricsFactory);
|
||||||
|
|
||||||
|
// Only sleep for time that is required.
|
||||||
|
processRecordsLatch.await();
|
||||||
|
|
||||||
|
// Make sure record processor is initialized and processing records.
|
||||||
|
verify(recordProcessorFactory, times(1)).createProcessor();
|
||||||
|
verify(recordProcessor, times(1)).initialize(any(InitializationInput.class));
|
||||||
|
verify(recordProcessor, atLeast(1)).processRecords(any(ProcessRecordsInput.class));
|
||||||
|
verify(recordProcessor, times(0)).shutdown(any(ShutdownInput.class));
|
||||||
|
|
||||||
|
workerThread.getWorker().shutdown();
|
||||||
|
workerThread.join();
|
||||||
|
|
||||||
|
Assert.assertTrue(workerThread.getState() == State.TERMINATED);
|
||||||
|
verify(recordProcessor, times(1)).shutdown(any(ShutdownInput.class));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public final void testWorkerForcefulShutdown() throws Exception {
|
||||||
|
final List<Shard> shardList = createShardListWithOneShard();
|
||||||
|
final boolean callProcessRecordsForEmptyRecordList = true;
|
||||||
|
final long failoverTimeMillis = 50L;
|
||||||
|
final int numberOfRecordsPerShard = 10;
|
||||||
|
|
||||||
|
final List<KinesisClientLease> initialLeases = new ArrayList<KinesisClientLease>();
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
KinesisClientLease lease = ShardSyncer.newKCLLease(shard);
|
||||||
|
lease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||||
|
initialLeases.add(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
final File file = KinesisLocalFileDataCreator.generateTempDataFile(
|
||||||
|
shardList, numberOfRecordsPerShard, "normalShutdownUnitTest");
|
||||||
|
final IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath());
|
||||||
|
|
||||||
|
// Get executor service that will be owned by the worker, so we can get interrupts.
|
||||||
|
ExecutorService executorService = getWorkerThreadPoolExecutor();
|
||||||
|
|
||||||
|
// Make test case as efficient as possible.
|
||||||
|
final CountDownLatch processRecordsLatch = new CountDownLatch(1);
|
||||||
|
final AtomicBoolean recordProcessorInterrupted = new AtomicBoolean(false);
|
||||||
|
IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class);
|
||||||
|
IRecordProcessor recordProcessor = mock(IRecordProcessor.class);
|
||||||
|
when(recordProcessorFactory.createProcessor()).thenReturn(recordProcessor);
|
||||||
|
|
||||||
|
doAnswer(new Answer<Object> () {
|
||||||
|
@Override
|
||||||
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
|
// Signal that record processor has started processing records.
|
||||||
|
processRecordsLatch.countDown();
|
||||||
|
|
||||||
|
// Block for some time now to test forceful shutdown. Also, check if record processor
|
||||||
|
// was interrupted or not.
|
||||||
|
final long totalSleepTimeMillis = failoverTimeMillis * 10;
|
||||||
|
final long startTimeMillis = System.currentTimeMillis();
|
||||||
|
long elapsedTimeMillis = 0;
|
||||||
|
while (elapsedTimeMillis < totalSleepTimeMillis) {
|
||||||
|
try {
|
||||||
|
Thread.sleep(totalSleepTimeMillis);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
recordProcessorInterrupted.getAndSet(true);
|
||||||
|
}
|
||||||
|
elapsedTimeMillis = System.currentTimeMillis() - startTimeMillis;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}).when(recordProcessor).processRecords(any(ProcessRecordsInput.class));
|
||||||
|
|
||||||
|
WorkerThread workerThread = runWorker(shardList,
|
||||||
|
initialLeases,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
failoverTimeMillis,
|
||||||
|
numberOfRecordsPerShard,
|
||||||
|
fileBasedProxy,
|
||||||
|
recordProcessorFactory,
|
||||||
|
executorService,
|
||||||
|
nullMetricsFactory);
|
||||||
|
|
||||||
|
// Only sleep for time that is required.
|
||||||
|
processRecordsLatch.await();
|
||||||
|
|
||||||
|
// Make sure record processor is initialized and processing records.
|
||||||
|
verify(recordProcessorFactory, times(1)).createProcessor();
|
||||||
|
verify(recordProcessor, times(1)).initialize(any(InitializationInput.class));
|
||||||
|
verify(recordProcessor, atLeast(1)).processRecords(any(ProcessRecordsInput.class));
|
||||||
|
verify(recordProcessor, times(0)).shutdown(any(ShutdownInput.class));
|
||||||
|
|
||||||
|
workerThread.getWorker().shutdown();
|
||||||
|
workerThread.join();
|
||||||
|
|
||||||
|
Assert.assertTrue(workerThread.getState() == State.TERMINATED);
|
||||||
|
// Shutdown should not be called in this case because record processor is blocked.
|
||||||
|
verify(recordProcessor, times(0)).shutdown(any(ShutdownInput.class));
|
||||||
|
Assert.assertTrue(recordProcessorInterrupted.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns executor service that will be owned by the worker. This is useful to test the scenario
|
||||||
|
* where worker shuts down the executor service also during shutdown flow.
|
||||||
|
* @return Executor service that will be owned by the worker.
|
||||||
|
*/
|
||||||
|
private WorkerThreadPoolExecutor getWorkerThreadPoolExecutor() {
|
||||||
|
return new WorkerThreadPoolExecutor();
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<Shard> createShardListWithOneShard() {
|
||||||
|
List<Shard> shards = new ArrayList<Shard>();
|
||||||
|
SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("39428", "987324");
|
||||||
|
HashKeyRange keyRange =
|
||||||
|
ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, ShardObjectHelper.MAX_HASH_KEY);
|
||||||
|
Shard shard0 = ShardObjectHelper.newShard("shardId-0", null, null, range0, keyRange);
|
||||||
|
shards.add(shard0);
|
||||||
|
|
||||||
|
return shards;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
private List<Shard> createShardListWithOneSplit() {
|
||||||
|
List<Shard> shards = new ArrayList<Shard>();
|
||||||
|
SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("39428", "987324");
|
||||||
|
SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("987325", null);
|
||||||
|
HashKeyRange keyRange =
|
||||||
|
ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, ShardObjectHelper.MAX_HASH_KEY);
|
||||||
|
Shard shard0 = ShardObjectHelper.newShard("shardId-0", null, null, range0, keyRange);
|
||||||
|
shards.add(shard0);
|
||||||
|
|
||||||
|
Shard shard1 = ShardObjectHelper.newShard("shardId-1", "shardId-0", null, range1, keyRange);
|
||||||
|
shards.add(shard1);
|
||||||
|
|
||||||
|
return shards;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void runAndTestWorker(int numShards, int threadPoolSize) throws Exception {
|
||||||
|
final int numberOfRecordsPerShard = 10;
|
||||||
|
final String kinesisShardPrefix = "kinesis-0-";
|
||||||
|
final BigInteger startSeqNum = BigInteger.ONE;
|
||||||
|
List<Shard> shardList = KinesisLocalFileDataCreator.createShardList(numShards, kinesisShardPrefix, startSeqNum);
|
||||||
|
Assert.assertEquals(numShards, shardList.size());
|
||||||
|
List<KinesisClientLease> initialLeases = new ArrayList<KinesisClientLease>();
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
KinesisClientLease lease = ShardSyncer.newKCLLease(shard);
|
||||||
|
lease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||||
|
initialLeases.add(lease);
|
||||||
|
}
|
||||||
|
runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void runAndTestWorker(List<Shard> shardList,
|
||||||
|
int threadPoolSize,
|
||||||
|
List<KinesisClientLease> initialLeases,
|
||||||
|
boolean callProcessRecordsForEmptyRecordList,
|
||||||
|
int numberOfRecordsPerShard) throws Exception {
|
||||||
|
File file = KinesisLocalFileDataCreator.generateTempDataFile(shardList, numberOfRecordsPerShard, "unitTestWT001");
|
||||||
|
IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath());
|
||||||
|
|
||||||
|
Semaphore recordCounter = new Semaphore(0);
|
||||||
|
ShardSequenceVerifier shardSequenceVerifier = new ShardSequenceVerifier(shardList);
|
||||||
|
TestStreamletFactory recordProcessorFactory = new TestStreamletFactory(recordCounter, shardSequenceVerifier);
|
||||||
|
|
||||||
|
ExecutorService executorService = Executors.newFixedThreadPool(threadPoolSize);
|
||||||
|
|
||||||
|
WorkerThread workerThread = runWorker(
|
||||||
|
shardList, initialLeases, callProcessRecordsForEmptyRecordList, failoverTimeMillis,
|
||||||
|
numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory, executorService, nullMetricsFactory);
|
||||||
|
|
||||||
|
// TestStreamlet will release the semaphore once for every record it processes
|
||||||
|
recordCounter.acquire(numberOfRecordsPerShard * shardList.size());
|
||||||
|
|
||||||
|
// Wait a bit to allow the worker to spin against the end of the stream.
|
||||||
|
Thread.sleep(500L);
|
||||||
|
|
||||||
|
testWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList,
|
||||||
|
numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory);
|
||||||
|
|
||||||
|
workerThread.getWorker().shutdown();
|
||||||
|
executorService.shutdownNow();
|
||||||
|
file.delete();
|
||||||
|
}
|
||||||
|
|
||||||
|
private WorkerThread runWorker(List<Shard> shardList,
|
||||||
|
List<KinesisClientLease> initialLeases,
|
||||||
|
boolean callProcessRecordsForEmptyRecordList,
|
||||||
|
long failoverTimeMillis,
|
||||||
|
int numberOfRecordsPerShard,
|
||||||
|
IKinesisProxy kinesisProxy,
|
||||||
|
IRecordProcessorFactory recordProcessorFactory,
|
||||||
|
ExecutorService executorService,
|
||||||
|
IMetricsFactory metricsFactory) throws Exception {
|
||||||
|
final String stageName = "testStageName";
|
||||||
|
final int maxRecords = 2;
|
||||||
|
|
||||||
|
final long leaseDurationMillis = 10000L;
|
||||||
|
final long epsilonMillis = 1000L;
|
||||||
|
final long idleTimeInMilliseconds = 2L;
|
||||||
|
|
||||||
|
AmazonDynamoDB ddbClient = DynamoDBEmbedded.create();
|
||||||
|
LeaseManager<KinesisClientLease> leaseManager = new KinesisClientLeaseManager("foo", ddbClient);
|
||||||
|
leaseManager.createLeaseTableIfNotExists(1L, 1L);
|
||||||
|
for (KinesisClientLease initialLease : initialLeases) {
|
||||||
|
leaseManager.createLeaseIfNotExists(initialLease);
|
||||||
|
}
|
||||||
|
|
||||||
|
KinesisClientLibLeaseCoordinator leaseCoordinator =
|
||||||
|
new KinesisClientLibLeaseCoordinator(leaseManager,
|
||||||
|
stageName,
|
||||||
|
leaseDurationMillis,
|
||||||
|
epsilonMillis,
|
||||||
|
metricsFactory);
|
||||||
|
|
||||||
|
StreamConfig streamConfig =
|
||||||
|
new StreamConfig(kinesisProxy,
|
||||||
|
maxRecords,
|
||||||
|
idleTimeInMilliseconds,
|
||||||
|
callProcessRecordsForEmptyRecordList,
|
||||||
|
skipCheckpointValidationValue,
|
||||||
|
initialPositionInStream);
|
||||||
|
|
||||||
|
Worker worker =
|
||||||
|
new Worker(stageName,
|
||||||
|
recordProcessorFactory,
|
||||||
|
streamConfig,
|
||||||
|
InitialPositionInStream.TRIM_HORIZON,
|
||||||
|
parentShardPollIntervalMillis,
|
||||||
|
shardSyncIntervalMillis,
|
||||||
|
cleanupLeasesUponShardCompletion,
|
||||||
|
leaseCoordinator,
|
||||||
|
leaseCoordinator,
|
||||||
|
executorService,
|
||||||
|
metricsFactory,
|
||||||
|
taskBackoffTimeMillis,
|
||||||
|
failoverTimeMillis);
|
||||||
|
|
||||||
|
WorkerThread workerThread = new WorkerThread(worker);
|
||||||
|
workerThread.start();
|
||||||
|
return workerThread;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testWorker(List<Shard> shardList,
|
||||||
|
int threadPoolSize,
|
||||||
|
List<KinesisClientLease> initialLeases,
|
||||||
|
boolean callProcessRecordsForEmptyRecordList,
|
||||||
|
int numberOfRecordsPerShard,
|
||||||
|
IKinesisProxy kinesisProxy,
|
||||||
|
TestStreamletFactory recordProcessorFactory) throws Exception {
|
||||||
|
recordProcessorFactory.getShardSequenceVerifier().verify();
|
||||||
|
|
||||||
|
// Gather values to compare across all processors of a given shard.
|
||||||
|
Map<String, List<Record>> shardStreamletsRecords = new HashMap<String, List<Record>>();
|
||||||
|
Map<String, ShutdownReason> shardsLastProcessorShutdownReason = new HashMap<String, ShutdownReason>();
|
||||||
|
Map<String, Long> shardsNumProcessRecordsCallsWithEmptyRecordList = new HashMap<String, Long>();
|
||||||
|
for (TestStreamlet processor : recordProcessorFactory.getTestStreamlets()) {
|
||||||
|
String shardId = processor.getShardId();
|
||||||
|
if (shardStreamletsRecords.get(shardId) == null) {
|
||||||
|
shardStreamletsRecords.put(shardId, processor.getProcessedRecords());
|
||||||
|
} else {
|
||||||
|
List<Record> records = shardStreamletsRecords.get(shardId);
|
||||||
|
records.addAll(processor.getProcessedRecords());
|
||||||
|
shardStreamletsRecords.put(shardId, records);
|
||||||
|
}
|
||||||
|
if (shardsNumProcessRecordsCallsWithEmptyRecordList.get(shardId) == null) {
|
||||||
|
shardsNumProcessRecordsCallsWithEmptyRecordList.put(shardId,
|
||||||
|
processor.getNumProcessRecordsCallsWithEmptyRecordList());
|
||||||
|
} else {
|
||||||
|
long totalShardsNumProcessRecordsCallsWithEmptyRecordList =
|
||||||
|
shardsNumProcessRecordsCallsWithEmptyRecordList.get(shardId)
|
||||||
|
+ processor.getNumProcessRecordsCallsWithEmptyRecordList();
|
||||||
|
shardsNumProcessRecordsCallsWithEmptyRecordList.put(shardId,
|
||||||
|
totalShardsNumProcessRecordsCallsWithEmptyRecordList);
|
||||||
|
}
|
||||||
|
shardsLastProcessorShutdownReason.put(processor.getShardId(), processor.getShutdownReason());
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify that all records were processed at least once
|
||||||
|
verifyAllRecordsOfEachShardWereConsumedAtLeastOnce(shardList, kinesisProxy, numberOfRecordsPerShard, shardStreamletsRecords);
|
||||||
|
|
||||||
|
// within a record processor all the incoming records should be ordered
|
||||||
|
verifyRecordsProcessedByEachProcessorWereOrdered(recordProcessorFactory);
|
||||||
|
|
||||||
|
// for shards for which only one record processor was created, we verify that each record should be
|
||||||
|
// processed exactly once
|
||||||
|
verifyAllRecordsOfEachShardWithOnlyOneProcessorWereConsumedExactlyOnce(shardList,
|
||||||
|
kinesisProxy,
|
||||||
|
numberOfRecordsPerShard,
|
||||||
|
shardStreamletsRecords,
|
||||||
|
recordProcessorFactory);
|
||||||
|
|
||||||
|
// if callProcessRecordsForEmptyRecordList flag is set then processors must have been invoked with empty record
|
||||||
|
// sets else they shouldn't have seen invoked with empty record sets
|
||||||
|
verifyNumProcessRecordsCallsWithEmptyRecordList(shardList,
|
||||||
|
shardsNumProcessRecordsCallsWithEmptyRecordList,
|
||||||
|
callProcessRecordsForEmptyRecordList);
|
||||||
|
|
||||||
|
// verify that worker shutdown last processor of shards that were terminated
|
||||||
|
verifyLastProcessorOfClosedShardsWasShutdownWithTerminate(shardList, shardsLastProcessorShutdownReason);
|
||||||
|
}
|
||||||
|
|
||||||
|
// within a record processor all the incoming records should be ordered
|
||||||
|
private void verifyRecordsProcessedByEachProcessorWereOrdered(TestStreamletFactory recordProcessorFactory) {
|
||||||
|
for (TestStreamlet processor : recordProcessorFactory.getTestStreamlets()) {
|
||||||
|
List<Record> processedRecords = processor.getProcessedRecords();
|
||||||
|
for (int i = 0; i < processedRecords.size() - 1; i++) {
|
||||||
|
BigInteger sequenceNumberOfcurrentRecord = new BigInteger(processedRecords.get(i).getSequenceNumber());
|
||||||
|
BigInteger sequenceNumberOfNextRecord = new BigInteger(processedRecords.get(i + 1).getSequenceNumber());
|
||||||
|
Assert.assertTrue(sequenceNumberOfcurrentRecord.subtract(sequenceNumberOfNextRecord).signum() == -1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// for shards for which only one record processor was created, we verify that each record should be
|
||||||
|
// processed exactly once
|
||||||
|
private void verifyAllRecordsOfEachShardWithOnlyOneProcessorWereConsumedExactlyOnce(List<Shard> shardList,
|
||||||
|
IKinesisProxy fileBasedProxy,
|
||||||
|
int numRecs,
|
||||||
|
Map<String, List<Record>> shardStreamletsRecords,
|
||||||
|
TestStreamletFactory recordProcessorFactory) {
|
||||||
|
Map<String, TestStreamlet> shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor =
|
||||||
|
findShardIdsAndStreamLetsOfShardsWithOnlyOneProcessor(recordProcessorFactory);
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
String shardId = shard.getShardId();
|
||||||
|
String iterator = fileBasedProxy.getIterator(shardId, ShardIteratorType.TRIM_HORIZON.toString(), null);
|
||||||
|
List<Record> expectedRecords = fileBasedProxy.get(iterator, numRecs).getRecords();
|
||||||
|
if (shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor.containsKey(shardId)) {
|
||||||
|
verifyAllRecordsWereConsumedExactlyOnce(expectedRecords,
|
||||||
|
shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor.get(shardId).getProcessedRecords());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify that all records were processed at least once
|
||||||
|
private void verifyAllRecordsOfEachShardWereConsumedAtLeastOnce(List<Shard> shardList,
|
||||||
|
IKinesisProxy fileBasedProxy,
|
||||||
|
int numRecs,
|
||||||
|
Map<String, List<Record>> shardStreamletsRecords) {
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
String shardId = shard.getShardId();
|
||||||
|
String iterator = fileBasedProxy.getIterator(shardId, ShardIteratorType.TRIM_HORIZON.toString(), null);
|
||||||
|
List<Record> expectedRecords = fileBasedProxy.get(iterator, numRecs).getRecords();
|
||||||
|
verifyAllRecordsWereConsumedAtLeastOnce(expectedRecords, shardStreamletsRecords.get(shardId));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify that worker shutdown last processor of shards that were terminated
|
||||||
|
private void verifyLastProcessorOfClosedShardsWasShutdownWithTerminate(List<Shard> shardList,
|
||||||
|
Map<String, ShutdownReason> shardsLastProcessorShutdownReason) {
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
String shardId = shard.getShardId();
|
||||||
|
String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber();
|
||||||
|
if (endingSequenceNumber != null) {
|
||||||
|
LOG.info("Closed shard " + shardId + " has an endingSequenceNumber " + endingSequenceNumber);
|
||||||
|
Assert.assertEquals(ShutdownReason.TERMINATE, shardsLastProcessorShutdownReason.get(shardId));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if callProcessRecordsForEmptyRecordList flag is set then processors must have been invoked with empty record
|
||||||
|
// sets else they shouldn't have seen invoked with empty record sets
|
||||||
|
private void verifyNumProcessRecordsCallsWithEmptyRecordList(List<Shard> shardList,
|
||||||
|
Map<String, Long> shardsNumProcessRecordsCallsWithEmptyRecordList,
|
||||||
|
boolean callProcessRecordsForEmptyRecordList) {
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
String shardId = shard.getShardId();
|
||||||
|
String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber();
|
||||||
|
// check only for open shards
|
||||||
|
if (endingSequenceNumber == null) {
|
||||||
|
if (callProcessRecordsForEmptyRecordList) {
|
||||||
|
Assert.assertTrue(shardsNumProcessRecordsCallsWithEmptyRecordList.get(shardId) > 0);
|
||||||
|
} else {
|
||||||
|
Assert.assertEquals(0, (long) shardsNumProcessRecordsCallsWithEmptyRecordList.get(shardId));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, TestStreamlet>
|
||||||
|
findShardIdsAndStreamLetsOfShardsWithOnlyOneProcessor(TestStreamletFactory recordProcessorFactory) {
|
||||||
|
Map<String, TestStreamlet> shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor =
|
||||||
|
new HashMap<String, TestStreamlet>();
|
||||||
|
Set<String> seenShardIds = new HashSet<String>();
|
||||||
|
for (TestStreamlet processor : recordProcessorFactory.getTestStreamlets()) {
|
||||||
|
String shardId = processor.getShardId();
|
||||||
|
if (seenShardIds.add(shardId)) {
|
||||||
|
shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor.put(shardId, processor);
|
||||||
|
} else {
|
||||||
|
shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor.remove(shardId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor;
|
||||||
|
}
|
||||||
|
|
||||||
|
//@formatter:off (gets the formatting wrong)
|
||||||
|
private void verifyAllRecordsWereConsumedExactlyOnce(List<Record> expectedRecords,
|
||||||
|
List<Record> actualRecords) {
|
||||||
|
//@formatter:on
|
||||||
|
Assert.assertEquals(expectedRecords.size(), actualRecords.size());
|
||||||
|
ListIterator<Record> expectedIter = expectedRecords.listIterator();
|
||||||
|
ListIterator<Record> actualIter = actualRecords.listIterator();
|
||||||
|
for (int i = 0; i < expectedRecords.size(); ++i) {
|
||||||
|
Assert.assertEquals(expectedIter.next(), actualIter.next());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//@formatter:off (gets the formatting wrong)
|
||||||
|
private void verifyAllRecordsWereConsumedAtLeastOnce(List<Record> expectedRecords,
|
||||||
|
List<Record> actualRecords) {
|
||||||
|
//@formatter:on
|
||||||
|
ListIterator<Record> expectedIter = expectedRecords.listIterator();
|
||||||
|
for (int i = 0; i < expectedRecords.size(); ++i) {
|
||||||
|
Record expectedRecord = expectedIter.next();
|
||||||
|
Assert.assertTrue(actualRecords.contains(expectedRecord));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class WorkerThread extends Thread {
|
||||||
|
private final Worker worker;
|
||||||
|
|
||||||
|
private WorkerThread(Worker worker) {
|
||||||
|
super(worker);
|
||||||
|
this.worker = worker;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Worker getWorker() {
|
||||||
|
return worker;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,390 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.proxies;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStreamReader;
|
||||||
|
import java.math.BigInteger;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.CharBuffer;
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import java.nio.charset.CharsetEncoder;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.LinkedList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.model.DescribeStreamResult;
|
||||||
|
import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
|
||||||
|
import com.amazonaws.services.kinesis.model.GetRecordsResult;
|
||||||
|
import com.amazonaws.services.kinesis.model.InvalidArgumentException;
|
||||||
|
import com.amazonaws.services.kinesis.model.PutRecordResult;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.amazonaws.services.kinesis.model.ResourceNotFoundException;
|
||||||
|
import com.amazonaws.services.kinesis.model.SequenceNumberRange;
|
||||||
|
import com.amazonaws.services.kinesis.model.Shard;
|
||||||
|
import com.amazonaws.services.kinesis.model.ShardIteratorType;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is a (temporary) test utility class, to mimic Kinesis without having to integrate with Alpha.
|
||||||
|
* In future, we should consider moving this to the Kinesis client/sampleApp package (if useful to
|
||||||
|
* other Kinesis clients).
|
||||||
|
*/
|
||||||
|
public class KinesisLocalFileProxy implements IKinesisProxy {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fields in the local file and their position in a line.
|
||||||
|
*/
|
||||||
|
public enum LocalFileFields {
|
||||||
|
/** Shard identifier. */
|
||||||
|
SHARD_ID(0),
|
||||||
|
/** Sequence number (assumed unique across shards. */
|
||||||
|
SEQUENCE_NUMBER(1),
|
||||||
|
/** Partition key associated with data record. */
|
||||||
|
PARTITION_KEY(2),
|
||||||
|
/** Data. */
|
||||||
|
DATA(3);
|
||||||
|
|
||||||
|
private final int position;
|
||||||
|
|
||||||
|
LocalFileFields(int position) {
|
||||||
|
this.position = position;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return Position of the field in the line.
|
||||||
|
*/
|
||||||
|
public int getPosition() {
|
||||||
|
return position;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(KinesisLocalFileProxy.class);
|
||||||
|
|
||||||
|
private static final String ITERATOR_DELIMITER = ":";
|
||||||
|
|
||||||
|
private static final int NUM_FIELDS_IN_FILE = LocalFileFields.values().length;
|
||||||
|
|
||||||
|
private final Map<String, List<Record>> shardedDataRecords = new HashMap<String, List<Record>>();
|
||||||
|
|
||||||
|
private List<Shard> shardList;
|
||||||
|
|
||||||
|
// Ids of shards that are closed - used to return a null iterator in getRecords after the last record
|
||||||
|
private Set<String> closedShards = new HashSet<String>();
|
||||||
|
|
||||||
|
private static final int EXPONENT = 128;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Max value of the hashed partition key (2^128-1). Useful for constructing shards for a stream.
|
||||||
|
*/
|
||||||
|
public static final BigInteger MAX_HASHKEY_VALUE = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Max value of a sequence number (2^128 -1). Useful for defining sequence number range for a shard.
|
||||||
|
*/
|
||||||
|
public static final BigInteger MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param fileName File with data records (one per line).
|
||||||
|
* File format (shardId, sequenceNumber, partitionKey, dataRecord).
|
||||||
|
* @throws IOException IOException
|
||||||
|
*/
|
||||||
|
public KinesisLocalFileProxy(String fileName) throws IOException {
|
||||||
|
super();
|
||||||
|
populateDataRecordsFromFile(fileName);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void populateDataRecordsFromFile(String file) throws IOException {
|
||||||
|
try (BufferedReader in = new BufferedReader(
|
||||||
|
new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8))) {
|
||||||
|
Charset charset = Charset.forName("UTF-8");
|
||||||
|
CharsetEncoder encoder = charset.newEncoder();
|
||||||
|
String str;
|
||||||
|
str = in.readLine();
|
||||||
|
if (str != null) {
|
||||||
|
ObjectMapper objectMapper = new ObjectMapper();
|
||||||
|
SerializedShardList shards = objectMapper.readValue(str, SerializedShardList.class);
|
||||||
|
shardList = shards.getShardList();
|
||||||
|
}
|
||||||
|
if (shardList == null) {
|
||||||
|
shardList = new ArrayList<Shard>();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate shardIds of shards that have an ending sequence number (and which != maxSeqNum).
|
||||||
|
// GetRecords will return a null iterator for these after all data has been returned.
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
SequenceNumberRange range = shard.getSequenceNumberRange();
|
||||||
|
if ((range != null) && (range.getEndingSequenceNumber() != null)) {
|
||||||
|
BigInteger endingSequenceNumber = new BigInteger(range.getEndingSequenceNumber());
|
||||||
|
if (endingSequenceNumber.compareTo(MAX_SEQUENCE_NUMBER) != 0) {
|
||||||
|
closedShards.add(shard.getShardId());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
shardedDataRecords.put(shard.getShardId(), new ArrayList<Record>());
|
||||||
|
}
|
||||||
|
|
||||||
|
while ((str = in.readLine()) != null) {
|
||||||
|
String[] strArr = str.split(",");
|
||||||
|
if (strArr.length != NUM_FIELDS_IN_FILE) {
|
||||||
|
throw new InvalidArgumentException("Unexpected input in file."
|
||||||
|
+ "Expected format (shardId, sequenceNumber, partitionKey, dataRecord)");
|
||||||
|
}
|
||||||
|
String shardId = strArr[LocalFileFields.SHARD_ID.getPosition()];
|
||||||
|
Record record = new Record();
|
||||||
|
record.setSequenceNumber(strArr[LocalFileFields.SEQUENCE_NUMBER.getPosition()]);
|
||||||
|
record.setPartitionKey(strArr[LocalFileFields.PARTITION_KEY.getPosition()]);
|
||||||
|
ByteBuffer byteBuffer = encoder.encode(CharBuffer.wrap(strArr[LocalFileFields.DATA.getPosition()]));
|
||||||
|
record.setData(byteBuffer);
|
||||||
|
List<Record> shardRecords = shardedDataRecords.get(shardId);
|
||||||
|
if (shardRecords == null) {
|
||||||
|
shardRecords = new ArrayList<Record>();
|
||||||
|
}
|
||||||
|
shardRecords.add(record);
|
||||||
|
shardedDataRecords.put(shardId, shardRecords);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* (non-Javadoc)
|
||||||
|
*
|
||||||
|
* @see com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy#getStreamInfo()
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public DescribeStreamResult getStreamInfo(String startShardId) throws ResourceNotFoundException {
|
||||||
|
assert false : "getStreamInfo is not implemented.";
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<String> getAllShardIds() throws ResourceNotFoundException {
|
||||||
|
Set<String> shardIds = new HashSet<String>();
|
||||||
|
if (shardedDataRecords != null) {
|
||||||
|
shardIds.addAll(shardedDataRecords.keySet());
|
||||||
|
}
|
||||||
|
|
||||||
|
return shardIds;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Note, this method has package level access solely for testing purposes.
|
||||||
|
*/
|
||||||
|
static String serializeIterator(String shardId, String sequenceNumber) {
|
||||||
|
return String.format("%s%s%s", shardId, ITERATOR_DELIMITER, sequenceNumber);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Container class for the return tuple of deserializeIterator.
|
||||||
|
*/
|
||||||
|
// CHECKSTYLE:IGNORE VisibilityModifier FOR NEXT 10 LINES
|
||||||
|
static class IteratorInfo {
|
||||||
|
public String shardId;
|
||||||
|
|
||||||
|
public String sequenceNumber;
|
||||||
|
|
||||||
|
public IteratorInfo(String shardId, String sequenceNumber) {
|
||||||
|
this.shardId = shardId;
|
||||||
|
this.sequenceNumber = sequenceNumber;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deserialize our iterator - used by test cases to inspect returned iterators.
|
||||||
|
*
|
||||||
|
* @param iterator
|
||||||
|
* @return iteratorInfo
|
||||||
|
*/
|
||||||
|
static IteratorInfo deserializeIterator(String iterator) {
|
||||||
|
String[] splits = iterator.split(ITERATOR_DELIMITER);
|
||||||
|
return new IteratorInfo(splits[0], splits[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* (non-Javadoc)
|
||||||
|
*
|
||||||
|
* @see com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy#getIterator(java.lang.String,
|
||||||
|
* java.lang.String, java.lang.String)
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public String getIterator(String shardId, String iteratorEnum, String sequenceNumber)
|
||||||
|
throws ResourceNotFoundException, InvalidArgumentException {
|
||||||
|
/*
|
||||||
|
* If we don't have records in this shard, any iterator will return the empty list. Using a
|
||||||
|
* sequence number of 1 on an empty shard will give this behavior.
|
||||||
|
*/
|
||||||
|
List<Record> shardRecords = shardedDataRecords.get(shardId);
|
||||||
|
if (shardRecords == null) {
|
||||||
|
throw new ResourceNotFoundException(shardId + " does not exist");
|
||||||
|
}
|
||||||
|
if (shardRecords.isEmpty()) {
|
||||||
|
return serializeIterator(shardId, "1");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ShardIteratorType.LATEST.toString().equals(iteratorEnum)) {
|
||||||
|
/*
|
||||||
|
* If we do have records, LATEST should return an iterator that can be used to read the
|
||||||
|
* last record. Our iterators are inclusive for convenience.
|
||||||
|
*/
|
||||||
|
Record last = shardRecords.get(shardRecords.size() - 1);
|
||||||
|
return serializeIterator(shardId, last.getSequenceNumber());
|
||||||
|
} else if (ShardIteratorType.TRIM_HORIZON.toString().equals(iteratorEnum)) {
|
||||||
|
return serializeIterator(shardId, shardRecords.get(0).getSequenceNumber());
|
||||||
|
} else if (ShardIteratorType.AT_SEQUENCE_NUMBER.toString().equals(iteratorEnum)) {
|
||||||
|
return serializeIterator(shardId, sequenceNumber);
|
||||||
|
} else if (ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString().equals(iteratorEnum)) {
|
||||||
|
BigInteger num = new BigInteger(sequenceNumber);
|
||||||
|
num = num.add(BigInteger.ONE);
|
||||||
|
return serializeIterator(shardId, num.toString());
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException("IteratorEnum value was invalid: " + iteratorEnum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* (non-Javadoc)
|
||||||
|
*
|
||||||
|
* @see com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy#get(java.nio.ByteBuffer, int)
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public GetRecordsResult get(String serializedKinesisIterator, int maxRecords)
|
||||||
|
throws ResourceNotFoundException, InvalidArgumentException, ExpiredIteratorException {
|
||||||
|
IteratorInfo iterator = deserializeIterator(serializedKinesisIterator);
|
||||||
|
|
||||||
|
BigInteger startingPosition = new BigInteger(iterator.sequenceNumber);
|
||||||
|
BigInteger lastRecordsSeqNo = BigInteger.ONE;
|
||||||
|
List<Record> recordsToReturn = new ArrayList<Record>();
|
||||||
|
List<Record> shardRecords = shardedDataRecords.get(iterator.shardId);
|
||||||
|
if (shardRecords == null) {
|
||||||
|
throw new ResourceNotFoundException(iterator.shardId + " does not exist");
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean isHasMoreShards = false;
|
||||||
|
|
||||||
|
for (int i = 0; i < shardRecords.size(); i++) {
|
||||||
|
Record record = shardRecords.get(i);
|
||||||
|
BigInteger recordSequenceNumber = new BigInteger(record.getSequenceNumber());
|
||||||
|
// update lastRecordsSeqNo so if we return no records, it will be the seqNo of the last record.
|
||||||
|
lastRecordsSeqNo = recordSequenceNumber;
|
||||||
|
if (recordSequenceNumber.compareTo(startingPosition) >= 0) {
|
||||||
|
// Set endIndex (of sublist) to cap at either maxRecords or end of list.
|
||||||
|
int endIndex = Math.min(i + maxRecords, shardRecords.size());
|
||||||
|
recordsToReturn.addAll(shardRecords.subList(i, endIndex));
|
||||||
|
|
||||||
|
lastRecordsSeqNo = new BigInteger(shardRecords.get(endIndex - 1).getSequenceNumber());
|
||||||
|
if (endIndex < shardRecords.size()) {
|
||||||
|
isHasMoreShards = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
GetRecordsResult response = new GetRecordsResult();
|
||||||
|
response.setRecords(recordsToReturn);
|
||||||
|
|
||||||
|
// Set iterator only if the shard is not closed.
|
||||||
|
if (isHasMoreShards || (!closedShards.contains(iterator.shardId))) {
|
||||||
|
/*
|
||||||
|
* Use the sequence number of the last record returned + 1 to compute the next iterator.
|
||||||
|
*/
|
||||||
|
response.setNextShardIterator(serializeIterator(iterator.shardId, lastRecordsSeqNo.add(BigInteger.ONE)
|
||||||
|
.toString()));
|
||||||
|
LOG.debug("Returning a non null iterator for shard " + iterator.shardId);
|
||||||
|
} else {
|
||||||
|
LOG.info("Returning null iterator for shard " + iterator.shardId);
|
||||||
|
}
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritDoc}
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public PutRecordResult put(String exclusiveMinimumSequenceNumber,
|
||||||
|
String explicitHashKey,
|
||||||
|
String partitionKey,
|
||||||
|
ByteBuffer data) throws ResourceNotFoundException, InvalidArgumentException {
|
||||||
|
PutRecordResult output = new PutRecordResult();
|
||||||
|
|
||||||
|
BigInteger startingPosition = BigInteger.ONE;
|
||||||
|
|
||||||
|
if (exclusiveMinimumSequenceNumber != null) {
|
||||||
|
startingPosition = new BigInteger(exclusiveMinimumSequenceNumber).add(BigInteger.ONE);
|
||||||
|
}
|
||||||
|
|
||||||
|
output.setSequenceNumber(startingPosition.toString());
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritDoc}
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public List<Shard> getShardList() throws ResourceNotFoundException {
|
||||||
|
List<Shard> shards = new LinkedList<Shard>();
|
||||||
|
shards.addAll(shardList);
|
||||||
|
return shards;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Used for serializing/deserializing the shard list to the file.
|
||||||
|
*/
|
||||||
|
public static class SerializedShardList {
|
||||||
|
|
||||||
|
private List<Shard> shardList = new LinkedList<Shard>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Public to enable Jackson object mapper serialization.
|
||||||
|
*/
|
||||||
|
public SerializedShardList() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param shardList List of shards for the stream.
|
||||||
|
*/
|
||||||
|
public SerializedShardList(List<Shard> shardList) {
|
||||||
|
this.shardList.addAll(shardList);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* public to enable Jackson object mapper serialization.
|
||||||
|
*
|
||||||
|
* @return shardList
|
||||||
|
*/
|
||||||
|
public List<Shard> getShardList() {
|
||||||
|
return shardList;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* public to enable Jackson object mapper deserialization.
|
||||||
|
*
|
||||||
|
* @param shardList List of shards
|
||||||
|
*/
|
||||||
|
public void setShardList(List<Shard> shardList) {
|
||||||
|
this.shardList = shardList;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,64 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.proxies;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.math.BigInteger;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator;
|
||||||
|
|
||||||
|
/** Factory for KinesisProxy objects that use a local file for data. Useful for testing purposes.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class KinesisLocalFileProxyFactory implements IKinesisProxyFactory {
|
||||||
|
|
||||||
|
private static final int DEFAULT_NUM_SHARDS = 3;
|
||||||
|
private static final String DEFAULT_SHARD_ID_PREFIX = "ShardId-";
|
||||||
|
private static final int DEFAULT_NUM_RECORDS_PER_SHARD = 10;
|
||||||
|
private static final BigInteger DEFAULT_STARTING_SEQUENCE_NUMBER = BigInteger.ZERO;
|
||||||
|
|
||||||
|
private static final String DEFAULT_TEST_PROXY_FILE = "defaultKinesisProxyLocalFile";
|
||||||
|
|
||||||
|
private IKinesisProxy testKinesisProxy;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param fileName File to be used for stream data.
|
||||||
|
* If the file exists then it is expected to contain information for creating a test proxy object.
|
||||||
|
* If the file does not exist then a temporary file containing default values for a test proxy object
|
||||||
|
* will be created and used.
|
||||||
|
|
||||||
|
* @throws IOException This will be thrown if we can't read/create the data file.
|
||||||
|
*/
|
||||||
|
public KinesisLocalFileProxyFactory(String fileName) throws IOException {
|
||||||
|
File f = new File(fileName);
|
||||||
|
if (!f.exists()) {
|
||||||
|
f = KinesisLocalFileDataCreator.generateTempDataFile(
|
||||||
|
DEFAULT_NUM_SHARDS, DEFAULT_SHARD_ID_PREFIX, DEFAULT_NUM_RECORDS_PER_SHARD,
|
||||||
|
DEFAULT_STARTING_SEQUENCE_NUMBER, DEFAULT_TEST_PROXY_FILE);
|
||||||
|
}
|
||||||
|
testKinesisProxy = new KinesisLocalFileProxy(f.getAbsolutePath());
|
||||||
|
}
|
||||||
|
|
||||||
|
/* (non-Javadoc)
|
||||||
|
* @see com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxyFactory#getProxy(java.lang.String)
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public IKinesisProxy getProxy(String streamARN) {
|
||||||
|
return testKinesisProxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,211 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.proxies.util;
|
||||||
|
|
||||||
|
import java.io.BufferedWriter;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.OutputStreamWriter;
|
||||||
|
import java.math.BigInteger;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy;
|
||||||
|
import com.amazonaws.services.kinesis.model.HashKeyRange;
|
||||||
|
import com.amazonaws.services.kinesis.model.SequenceNumberRange;
|
||||||
|
import com.amazonaws.services.kinesis.model.Shard;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Temporary util class for generating data in a local file (used by KinesisLocalFileProxy).
|
||||||
|
*/
|
||||||
|
public class KinesisLocalFileDataCreator {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Partition key prefix - also referenced in KinesisLocalFileProxyTest.
|
||||||
|
*/
|
||||||
|
public static final String PARTITION_KEY_PREFIX = "PK_";
|
||||||
|
|
||||||
|
private static final String FILE_NAME_SUFFIX = ".dat";
|
||||||
|
|
||||||
|
private static final long RAND_SEED_VALUE = 1092387456L;
|
||||||
|
// Used to cap the size of the random "hole" in sequence numbers.
|
||||||
|
private static final int NUM_BITS = 3;
|
||||||
|
private static Random randomGenerator = new Random(RAND_SEED_VALUE);
|
||||||
|
|
||||||
|
private static final int PARTITION_KEY_LENGTH = 10;
|
||||||
|
private static final int DATA_LENGTH = 40;
|
||||||
|
|
||||||
|
private KinesisLocalFileDataCreator() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Creates a temp file (in default temp file location) with fake Kinesis data records.
|
||||||
|
* This method does not support resharding use cases.
|
||||||
|
* @param numShards Number of shards
|
||||||
|
* @param shardIdPrefix Prefix for shardIds (1, 2, ..., N will be added at the end to create shardIds)
|
||||||
|
* @param numRecordsPerShard Number of records to generate per shard
|
||||||
|
* @param startingSequenceNumber Sequence numbers in the generated data will be >= this number
|
||||||
|
* @param fileNamePrefix Prefix of the filename
|
||||||
|
* @return File created with the fake Kinesis records.
|
||||||
|
* @throws IOException Thrown if there are issues creating the file.
|
||||||
|
*/
|
||||||
|
public static File generateTempDataFile(
|
||||||
|
int numShards,
|
||||||
|
String shardIdPrefix,
|
||||||
|
int numRecordsPerShard,
|
||||||
|
BigInteger startingSequenceNumber,
|
||||||
|
String fileNamePrefix)
|
||||||
|
throws IOException {
|
||||||
|
List<Shard> shardList = createShardList(numShards, shardIdPrefix, startingSequenceNumber);
|
||||||
|
return generateTempDataFile(shardList, numRecordsPerShard, fileNamePrefix);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a temp file (in default temp file location) with fake Kinesis data records.
|
||||||
|
* Records will be put in all shards.
|
||||||
|
* @param fileNamePrefix Prefix for the name of the temp file
|
||||||
|
* @param shardList List of shards (we use the shardId and sequenceNumberRange fields)
|
||||||
|
* @param numRecordsPerShard Num records per shard (the shard sequenceNumberRange should be large enough
|
||||||
|
* for us to allow these many records with some "holes")
|
||||||
|
* @return File with stream data filled in
|
||||||
|
* @throws IOException Thrown if there are issues creating/updating the file
|
||||||
|
*/
|
||||||
|
public static File generateTempDataFile(List<Shard> shardList, int numRecordsPerShard, String fileNamePrefix)
|
||||||
|
throws IOException {
|
||||||
|
File file = File.createTempFile(fileNamePrefix, FILE_NAME_SUFFIX);
|
||||||
|
try (BufferedWriter fileWriter = new BufferedWriter(
|
||||||
|
new OutputStreamWriter(new FileOutputStream(file), StandardCharsets.UTF_8))) {
|
||||||
|
ObjectMapper objectMapper = new ObjectMapper();
|
||||||
|
String serializedShardList =
|
||||||
|
objectMapper.writeValueAsString(new KinesisLocalFileProxy.SerializedShardList(shardList));
|
||||||
|
fileWriter.write(serializedShardList);
|
||||||
|
fileWriter.newLine();
|
||||||
|
BigInteger sequenceNumberIncrement = new BigInteger("0");
|
||||||
|
for (int i = 0; i < numRecordsPerShard; i++) {
|
||||||
|
for (Shard shard : shardList) {
|
||||||
|
BigInteger sequenceNumber =
|
||||||
|
new BigInteger(shard.getSequenceNumberRange().getStartingSequenceNumber()).add(
|
||||||
|
sequenceNumberIncrement);
|
||||||
|
String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber();
|
||||||
|
BigInteger maxSequenceNumber = KinesisLocalFileProxy.MAX_SEQUENCE_NUMBER;
|
||||||
|
if (endingSequenceNumber != null) {
|
||||||
|
maxSequenceNumber = new BigInteger(endingSequenceNumber);
|
||||||
|
}
|
||||||
|
if (maxSequenceNumber.compareTo(sequenceNumber) != 1) {
|
||||||
|
throw new IllegalArgumentException("Not enough space in shard");
|
||||||
|
}
|
||||||
|
String partitionKey =
|
||||||
|
PARTITION_KEY_PREFIX + shard.getShardId() + generateRandomString(PARTITION_KEY_LENGTH);
|
||||||
|
String data = generateRandomString(DATA_LENGTH);
|
||||||
|
String line = shard.getShardId() + "," + sequenceNumber + "," + partitionKey + "," + data;
|
||||||
|
fileWriter.write(line);
|
||||||
|
fileWriter.newLine();
|
||||||
|
sequenceNumberIncrement = sequenceNumberIncrement.add(BigInteger.ONE);
|
||||||
|
sequenceNumberIncrement = sequenceNumberIncrement.add(new BigInteger(NUM_BITS, randomGenerator));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return file;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Helper method to create a list of shards (which can then be used to generate data files).
|
||||||
|
* @param numShards Number of shards
|
||||||
|
* @param shardIdPrefix Prefix for the shardIds
|
||||||
|
* @param startingSequenceNumber Starting sequence number for all the shards
|
||||||
|
* @return List of shards (with no reshard events).
|
||||||
|
*/
|
||||||
|
public static List<Shard> createShardList(int numShards, String shardIdPrefix, BigInteger startingSequenceNumber) {
|
||||||
|
List<Shard> shards = new ArrayList<Shard>(numShards);
|
||||||
|
|
||||||
|
SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
|
||||||
|
sequenceNumberRange.setStartingSequenceNumber(startingSequenceNumber.toString());
|
||||||
|
sequenceNumberRange.setEndingSequenceNumber(null);
|
||||||
|
BigInteger perShardHashKeyRange =
|
||||||
|
KinesisLocalFileProxy.MAX_HASHKEY_VALUE.divide(new BigInteger(Integer.toString(numShards)));
|
||||||
|
BigInteger hashKeyRangeStart = new BigInteger("0");
|
||||||
|
for (int i = 0; i < numShards; i++) {
|
||||||
|
Shard shard = new Shard();
|
||||||
|
shard.setShardId(shardIdPrefix + i);
|
||||||
|
shard.setSequenceNumberRange(sequenceNumberRange);
|
||||||
|
BigInteger hashKeyRangeEnd = hashKeyRangeStart.add(perShardHashKeyRange);
|
||||||
|
HashKeyRange hashKeyRange = new HashKeyRange();
|
||||||
|
hashKeyRange.setStartingHashKey(hashKeyRangeStart.toString());
|
||||||
|
hashKeyRange.setEndingHashKey(hashKeyRangeEnd.toString());
|
||||||
|
shards.add(shard);
|
||||||
|
}
|
||||||
|
|
||||||
|
return shards;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Generates a random string of specified length.
|
||||||
|
* @param length String of length will be generated
|
||||||
|
* @return Random generated string
|
||||||
|
*/
|
||||||
|
private static String generateRandomString(int length) {
|
||||||
|
StringBuffer str = new StringBuffer();
|
||||||
|
final int startingCharAsciiValue = 97;
|
||||||
|
final int numChars = 26;
|
||||||
|
for (int i = 0; i < length; i++) {
|
||||||
|
str.append((char) (randomGenerator.nextInt(numChars - 1) + startingCharAsciiValue));
|
||||||
|
}
|
||||||
|
return str.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Creates a new temp file populated with fake Kinesis data records.
|
||||||
|
* @param args Expects 5 args: numShards, shardPrefix, numRecordsPerShard, startingSequenceNumber, fileNamePrefix
|
||||||
|
*/
|
||||||
|
// CHECKSTYLE:OFF MagicNumber
|
||||||
|
// CHECKSTYLE:IGNORE UncommentedMain FOR NEXT 2 LINES
|
||||||
|
public static void main(String[] args) {
|
||||||
|
int numShards = 1;
|
||||||
|
String shardIdPrefix = "shardId";
|
||||||
|
int numRecordsPerShard = 17;
|
||||||
|
BigInteger startingSequenceNumber = new BigInteger("99");
|
||||||
|
String fileNamePrefix = "kinesisFakeRecords";
|
||||||
|
|
||||||
|
try {
|
||||||
|
if ((args.length != 0) && (args.length != 5)) {
|
||||||
|
// Temporary util code, so not providing detailed usage feedback.
|
||||||
|
System.out.println("Unexpected number of arguments.");
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (args.length == 5) {
|
||||||
|
numShards = Integer.parseInt(args[0]);
|
||||||
|
shardIdPrefix = args[1];
|
||||||
|
numRecordsPerShard = Integer.parseInt(args[2]);
|
||||||
|
startingSequenceNumber = new BigInteger(args[3]);
|
||||||
|
fileNamePrefix = args[4];
|
||||||
|
}
|
||||||
|
|
||||||
|
File file = KinesisLocalFileDataCreator.generateTempDataFile(
|
||||||
|
numShards,
|
||||||
|
shardIdPrefix,
|
||||||
|
numRecordsPerShard,
|
||||||
|
startingSequenceNumber,
|
||||||
|
fileNamePrefix);
|
||||||
|
System.out.println("Created fake kinesis records in file: " + file.getAbsolutePath());
|
||||||
|
} catch (Exception e) {
|
||||||
|
// CHECKSTYLE:IGNORE IllegalCatch FOR NEXT -1 LINES
|
||||||
|
System.out.println("Caught Exception: " + e);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// CHECKSTYLE:ON MagicNumber
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
package com.amazonaws.services.kinesis.clientlibrary.types;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unit tests of ShutdownReason enum class.
|
||||||
|
*/
|
||||||
|
public class ShutdownReasonTest {
|
||||||
|
@Test
|
||||||
|
public void testToString() {
|
||||||
|
Assert.assertEquals("ZOMBIE", String.valueOf(ShutdownReason.ZOMBIE));
|
||||||
|
Assert.assertEquals("TERMINATE", String.valueOf(ShutdownReason.TERMINATE));
|
||||||
|
Assert.assertEquals("ZOMBIE", ShutdownReason.ZOMBIE.toString());
|
||||||
|
Assert.assertEquals("TERMINATE", ShutdownReason.TERMINATE.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,213 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.leases.impl;
|
||||||
|
|
||||||
|
import java.awt.*;
|
||||||
|
import java.awt.event.ActionEvent;
|
||||||
|
import java.awt.event.ActionListener;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import javax.swing.*;
|
||||||
|
|
||||||
|
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||||
|
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.LeasingException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.CWMetricsFactory;
|
||||||
|
|
||||||
|
public class LeaseCoordinatorExerciser {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(LeaseCoordinatorExerciser.class);
|
||||||
|
|
||||||
|
public static void main(String[] args)
|
||||||
|
throws InterruptedException, DependencyException, InvalidStateException, ProvisionedThroughputException,
|
||||||
|
IOException {
|
||||||
|
|
||||||
|
int numCoordinators = 9;
|
||||||
|
int numLeases = 73;
|
||||||
|
int leaseDurationMillis = 10000;
|
||||||
|
int epsilonMillis = 100;
|
||||||
|
|
||||||
|
AWSCredentialsProvider creds =
|
||||||
|
new DefaultAWSCredentialsProviderChain();
|
||||||
|
AmazonDynamoDBClient ddb = new AmazonDynamoDBClient(creds);
|
||||||
|
|
||||||
|
ILeaseManager<KinesisClientLease> leaseManager = new KinesisClientLeaseManager("nagl_ShardProgress", ddb);
|
||||||
|
|
||||||
|
if (leaseManager.createLeaseTableIfNotExists(10L, 50L)) {
|
||||||
|
LOG.info("Waiting for newly created lease table");
|
||||||
|
if (!leaseManager.waitUntilLeaseTableExists(10, 300)) {
|
||||||
|
LOG.error("Table was not created in time");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CWMetricsFactory metricsFactory = new CWMetricsFactory(creds, "testNamespace", 30 * 1000, 1000);
|
||||||
|
final List<LeaseCoordinator<KinesisClientLease>> coordinators =
|
||||||
|
new ArrayList<LeaseCoordinator<KinesisClientLease>>();
|
||||||
|
for (int i = 0; i < numCoordinators; i++) {
|
||||||
|
String workerIdentifier = "worker-" + Integer.toString(i);
|
||||||
|
|
||||||
|
LeaseCoordinator<KinesisClientLease> coord = new LeaseCoordinator<KinesisClientLease>(leaseManager,
|
||||||
|
workerIdentifier,
|
||||||
|
leaseDurationMillis,
|
||||||
|
epsilonMillis,
|
||||||
|
metricsFactory);
|
||||||
|
|
||||||
|
coordinators.add(coord);
|
||||||
|
}
|
||||||
|
|
||||||
|
leaseManager.deleteAll();
|
||||||
|
|
||||||
|
for (int i = 0; i < numLeases; i++) {
|
||||||
|
KinesisClientLease lease = new KinesisClientLease();
|
||||||
|
lease.setLeaseKey(Integer.toString(i));
|
||||||
|
lease.setCheckpoint(new ExtendedSequenceNumber("checkpoint"));
|
||||||
|
leaseManager.createLeaseIfNotExists(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
final JFrame frame = new JFrame("Test Visualizer");
|
||||||
|
frame.setPreferredSize(new Dimension(800, 600));
|
||||||
|
final JPanel panel = new JPanel(new GridLayout(coordinators.size() + 1, 0));
|
||||||
|
final JLabel ticker = new JLabel("tick");
|
||||||
|
panel.add(ticker);
|
||||||
|
frame.getContentPane().add(panel);
|
||||||
|
|
||||||
|
final Map<String, JLabel> labels = new HashMap<String, JLabel>();
|
||||||
|
for (final LeaseCoordinator<KinesisClientLease> coord : coordinators) {
|
||||||
|
JPanel coordPanel = new JPanel();
|
||||||
|
coordPanel.setLayout(new BoxLayout(coordPanel, BoxLayout.X_AXIS));
|
||||||
|
final Button button = new Button("Stop " + coord.getWorkerIdentifier());
|
||||||
|
button.setMaximumSize(new Dimension(200, 50));
|
||||||
|
button.addActionListener(new ActionListener() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void actionPerformed(ActionEvent arg0) {
|
||||||
|
if (coord.isRunning()) {
|
||||||
|
coord.stop();
|
||||||
|
button.setLabel("Start " + coord.getWorkerIdentifier());
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
coord.start();
|
||||||
|
} catch (LeasingException e) {
|
||||||
|
LOG.error(e);
|
||||||
|
}
|
||||||
|
button.setLabel("Stop " + coord.getWorkerIdentifier());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
coordPanel.add(button);
|
||||||
|
|
||||||
|
JLabel label = new JLabel();
|
||||||
|
coordPanel.add(label);
|
||||||
|
labels.put(coord.getWorkerIdentifier(), label);
|
||||||
|
panel.add(coordPanel);
|
||||||
|
}
|
||||||
|
|
||||||
|
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
|
||||||
|
new Thread() {
|
||||||
|
|
||||||
|
// Key is lease key, value is green-ness as a value from 0 to 255.
|
||||||
|
// Great variable name, huh?
|
||||||
|
private Map<String, Integer> greenNesses = new HashMap<String, Integer>();
|
||||||
|
|
||||||
|
// Key is lease key, value is last owning worker
|
||||||
|
private Map<String, String> lastOwners = new HashMap<String, String>();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
while (true) {
|
||||||
|
for (LeaseCoordinator<KinesisClientLease> coord : coordinators) {
|
||||||
|
String workerIdentifier = coord.getWorkerIdentifier();
|
||||||
|
|
||||||
|
JLabel label = labels.get(workerIdentifier);
|
||||||
|
|
||||||
|
List<KinesisClientLease> asgn = new ArrayList<KinesisClientLease>(coord.getAssignments());
|
||||||
|
Collections.sort(asgn, new Comparator<KinesisClientLease>() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int compare(KinesisClientLease arg0, KinesisClientLease arg1) {
|
||||||
|
return arg0.getLeaseKey().compareTo(arg1.getLeaseKey());
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
builder.append("<html>");
|
||||||
|
builder.append(workerIdentifier).append(":").append(asgn.size()).append(" ");
|
||||||
|
|
||||||
|
for (KinesisClientLease lease : asgn) {
|
||||||
|
String leaseKey = lease.getLeaseKey();
|
||||||
|
String lastOwner = lastOwners.get(leaseKey);
|
||||||
|
|
||||||
|
// Color things green when they switch owners, decay the green-ness over time.
|
||||||
|
Integer greenNess = greenNesses.get(leaseKey);
|
||||||
|
if (greenNess == null || lastOwner == null || !lastOwner.equals(lease.getLeaseOwner())) {
|
||||||
|
greenNess = 200;
|
||||||
|
} else {
|
||||||
|
greenNess = Math.max(0, greenNess - 20);
|
||||||
|
}
|
||||||
|
greenNesses.put(leaseKey, greenNess);
|
||||||
|
lastOwners.put(leaseKey, lease.getLeaseOwner());
|
||||||
|
|
||||||
|
builder.append(String.format("<font color=\"%s\">%03d</font>",
|
||||||
|
String.format("#00%02x00", greenNess),
|
||||||
|
Integer.parseInt(leaseKey))).append(" ");
|
||||||
|
}
|
||||||
|
builder.append("</html>");
|
||||||
|
|
||||||
|
label.setText(builder.toString());
|
||||||
|
label.revalidate();
|
||||||
|
label.repaint();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ticker.getText().equals("tick")) {
|
||||||
|
ticker.setText("tock");
|
||||||
|
} else {
|
||||||
|
ticker.setText("tick");
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
Thread.sleep(200);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}.start();
|
||||||
|
|
||||||
|
frame.pack();
|
||||||
|
frame.setVisible(true);
|
||||||
|
|
||||||
|
for (LeaseCoordinator<KinesisClientLease> coord : coordinators) {
|
||||||
|
coord.start();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,74 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.leases.impl;
|
||||||
|
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.junit.Ignore;
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.rules.TestWatcher;
|
||||||
|
import org.junit.runner.Description;
|
||||||
|
|
||||||
|
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
|
||||||
|
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
|
||||||
|
|
||||||
|
@Ignore
|
||||||
|
public class LeaseIntegrationTest {
|
||||||
|
|
||||||
|
protected static KinesisClientLeaseManager leaseManager;
|
||||||
|
protected static AmazonDynamoDBClient ddbClient =
|
||||||
|
new AmazonDynamoDBClient(new DefaultAWSCredentialsProviderChain());
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(LeaseIntegrationTest.class);
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public TestWatcher watcher = new TestWatcher() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void starting(Description description) {
|
||||||
|
if (leaseManager == null) {
|
||||||
|
// Do some static setup once per class.
|
||||||
|
|
||||||
|
leaseManager = new KinesisClientLeaseManager("nagl_ShardProgress", ddbClient, true);
|
||||||
|
|
||||||
|
MetricsHelper.startScope(new NullMetricsFactory());
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (!leaseManager.leaseTableExists()) {
|
||||||
|
LOG.info("Creating lease table");
|
||||||
|
leaseManager.createLeaseTableIfNotExists(10L, 10L);
|
||||||
|
|
||||||
|
leaseManager.waitUntilLeaseTableExists(10, 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG.info("Beginning test case " + description.getMethodName());
|
||||||
|
for (KinesisClientLease lease : leaseManager.listLeases()) {
|
||||||
|
leaseManager.deleteLease(lease);
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
String message =
|
||||||
|
"Test case " + description.getMethodName() + " fails because of exception during init: " + e;
|
||||||
|
LOG.error(message);
|
||||||
|
throw new RuntimeException(message, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,268 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.leases.impl;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.LeasingException;
|
||||||
|
|
||||||
|
public class LeaseManagerIntegrationTest extends LeaseIntegrationTest {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test listLeases when no records are present.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testListNoRecords() throws LeasingException {
|
||||||
|
List<KinesisClientLease> leases = leaseManager.listLeases();
|
||||||
|
Assert.assertTrue(leases.isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests listLeases when records are present. Exercise dynamo's paging functionality.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testListWithRecords() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
int numRecordsToPut = 10;
|
||||||
|
|
||||||
|
for (int i = 0; i < numRecordsToPut; i++) {
|
||||||
|
builder.withLease(Integer.toString(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
Collection<KinesisClientLease> expected = builder.build().values();
|
||||||
|
|
||||||
|
// The / 3 here ensures that we will test Dynamo's paging mechanics.
|
||||||
|
List<KinesisClientLease> actual = leaseManager.list(numRecordsToPut / 3);
|
||||||
|
|
||||||
|
for (KinesisClientLease lease : actual) {
|
||||||
|
Assert.assertNotNull(expected.remove(lease));
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.assertTrue(expected.isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests getLease when a record is present.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testGetLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
Lease expected = builder.withLease("1").build().get("1");
|
||||||
|
|
||||||
|
Lease actual = leaseManager.getLease(expected.getLeaseKey());
|
||||||
|
Assert.assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests leaseManager.get() when the looked-for record is absent.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testGetNull() throws LeasingException {
|
||||||
|
Lease actual = leaseManager.getLease("bogusShardId");
|
||||||
|
Assert.assertNull(actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests leaseManager.holdLease's success scenario.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testRenewLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
KinesisClientLease lease = builder.withLease("1").build().get("1");
|
||||||
|
Long originalLeaseCounter = lease.getLeaseCounter();
|
||||||
|
|
||||||
|
leaseManager.renewLease(lease);
|
||||||
|
Assert.assertTrue(originalLeaseCounter + 1 == lease.getLeaseCounter());
|
||||||
|
|
||||||
|
Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey());
|
||||||
|
|
||||||
|
Assert.assertEquals(lease, fromDynamo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests leaseManager.holdLease when the lease has changed out from under us.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testHoldUpdatedLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
KinesisClientLease lease = builder.withLease("1").build().get("1");
|
||||||
|
|
||||||
|
KinesisClientLease leaseCopy = leaseManager.getLease(lease.getLeaseKey());
|
||||||
|
|
||||||
|
leaseManager.renewLease(lease);
|
||||||
|
|
||||||
|
Assert.assertFalse(leaseManager.renewLease(leaseCopy));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests takeLease when the lease is not already owned.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testTakeUnownedLease() throws LeasingException {
|
||||||
|
testTakeLease(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests takeLease when the lease is already owned.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testTakeOwnedLease() throws LeasingException {
|
||||||
|
testTakeLease(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testTakeLease(boolean owned) throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
KinesisClientLease lease = builder.withLease("1", owned ? "originalOwner" : null).build().get("1");
|
||||||
|
Long originalLeaseCounter = lease.getLeaseCounter();
|
||||||
|
|
||||||
|
String newOwner = "newOwner";
|
||||||
|
leaseManager.takeLease(lease, newOwner);
|
||||||
|
Assert.assertTrue(originalLeaseCounter + 1 == lease.getLeaseCounter());
|
||||||
|
Assert.assertTrue((owned ? 1 : 0) == lease.getOwnerSwitchesSinceCheckpoint());
|
||||||
|
Assert.assertEquals(newOwner, lease.getLeaseOwner());
|
||||||
|
|
||||||
|
Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey());
|
||||||
|
|
||||||
|
Assert.assertEquals(lease, fromDynamo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests takeLease when the lease has changed out from under us.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testTakeUpdatedLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
KinesisClientLease lease = builder.withLease("1").build().get("1");
|
||||||
|
|
||||||
|
KinesisClientLease leaseCopy = leaseManager.getLease(lease.getLeaseKey());
|
||||||
|
|
||||||
|
String newOwner = "newOwner";
|
||||||
|
leaseManager.takeLease(lease, newOwner);
|
||||||
|
|
||||||
|
Assert.assertFalse(leaseManager.takeLease(leaseCopy, newOwner));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests evictLease when the lease is currently unowned.
|
||||||
|
*/
|
||||||
|
public void testEvictUnownedLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
KinesisClientLease lease = builder.withLease("1", null).build().get("1");
|
||||||
|
|
||||||
|
Assert.assertFalse(leaseManager.evictLease(lease));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests evictLease when the lease is currently owned.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testEvictOwnedLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
KinesisClientLease lease = builder.withLease("1").build().get("1");
|
||||||
|
Long originalLeaseCounter = lease.getLeaseCounter();
|
||||||
|
|
||||||
|
leaseManager.evictLease(lease);
|
||||||
|
Assert.assertNull(lease.getLeaseOwner());
|
||||||
|
Assert.assertTrue(originalLeaseCounter + 1 == lease.getLeaseCounter());
|
||||||
|
|
||||||
|
Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey());
|
||||||
|
|
||||||
|
Assert.assertEquals(lease, fromDynamo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests evictLease when the lease has changed out from under us. Note that evicting leases
|
||||||
|
* is conditional on the lease owner, unlike everything else which is conditional on the
|
||||||
|
* lease counter.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testEvictChangedLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
KinesisClientLease lease = builder.withLease("1").build().get("1");
|
||||||
|
|
||||||
|
// Change the owner only - this should cause our optimistic lock to fail.
|
||||||
|
lease.setLeaseOwner("otherOwner");
|
||||||
|
Assert.assertFalse(leaseManager.evictLease(lease));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests deleteLease when a lease exists.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDeleteLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
KinesisClientLease lease = builder.withLease("1").build().get("1");
|
||||||
|
|
||||||
|
leaseManager.deleteLease(lease);
|
||||||
|
|
||||||
|
KinesisClientLease newLease = leaseManager.getLease(lease.getLeaseKey());
|
||||||
|
Assert.assertNull(newLease);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests deleteLease when a lease does not exist.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDeleteNonexistentLease() throws LeasingException {
|
||||||
|
KinesisClientLease lease = new KinesisClientLease();
|
||||||
|
lease.setLeaseKey("1");
|
||||||
|
// The lease has not been written to DDB - try to delete it and expect success.
|
||||||
|
|
||||||
|
leaseManager.deleteLease(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWaitUntilLeaseTableExists() throws LeasingException {
|
||||||
|
KinesisClientLeaseManager manager = new KinesisClientLeaseManager("nagl_ShardProgress", ddbClient, true) {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
long sleep(long timeToSleepMillis) {
|
||||||
|
Assert.fail("Should not sleep");
|
||||||
|
return 0L;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
Assert.assertTrue(manager.waitUntilLeaseTableExists(1, 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWaitUntilLeaseTableExistsTimeout() throws LeasingException {
|
||||||
|
/*
|
||||||
|
* Just using AtomicInteger for the indirection it provides.
|
||||||
|
*/
|
||||||
|
final AtomicInteger sleepCounter = new AtomicInteger(0);
|
||||||
|
KinesisClientLeaseManager manager = new KinesisClientLeaseManager("nonexistentTable", ddbClient, true) {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
long sleep(long timeToSleepMillis) {
|
||||||
|
Assert.assertEquals(1000L, timeToSleepMillis);
|
||||||
|
sleepCounter.incrementAndGet();
|
||||||
|
return 1000L;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
Assert.assertFalse(manager.waitUntilLeaseTableExists(2, 1));
|
||||||
|
Assert.assertEquals(1, sleepCounter.get());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,250 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.leases.impl;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.LeasingException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseRenewer;
|
||||||
|
|
||||||
|
public class LeaseRenewerIntegrationTest extends LeaseIntegrationTest {
|
||||||
|
|
||||||
|
// This test case's leases last 2 seconds
|
||||||
|
private static final long LEASE_DURATION_MILLIS = 2000L;
|
||||||
|
|
||||||
|
private ILeaseRenewer<KinesisClientLease> renewer;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() {
|
||||||
|
renewer = new LeaseRenewer<KinesisClientLease>(
|
||||||
|
leaseManager, "foo", LEASE_DURATION_MILLIS, Executors.newCachedThreadPool());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSimpleRenew() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "foo").build();
|
||||||
|
|
||||||
|
builder.addLeasesToRenew(renewer, "1");
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLeaseLoss() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "foo").withLease("2", "foo").build();
|
||||||
|
|
||||||
|
builder.addLeasesToRenew(renewer, "1", "2");
|
||||||
|
KinesisClientLease renewedLease = builder.renewMutateAssert(renewer, "1", "2").get("2");
|
||||||
|
|
||||||
|
leaseManager.updateLease(renewedLease);
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testClear() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "foo").build();
|
||||||
|
builder.addLeasesToRenew(renewer, "1");
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
|
||||||
|
renewer.clearCurrentlyHeldLeases();
|
||||||
|
builder.renewMutateAssert(renewer);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetCurrentlyHeldLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "foo").build();
|
||||||
|
builder.addLeasesToRenew(renewer, "1");
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
|
||||||
|
// this should be a copy that doesn't get updated
|
||||||
|
KinesisClientLease lease = renewer.getCurrentlyHeldLease("1");
|
||||||
|
Assert.assertEquals((Long) 1L, lease.getLeaseCounter());
|
||||||
|
|
||||||
|
// do one renewal and make sure the old copy doesn't get updated
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
|
||||||
|
Assert.assertEquals((Long) 1L, lease.getLeaseCounter());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetCurrentlyHeldLeases() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
KinesisClientLease lease2 = builder.withLease("1", "foo").withLease("2", "foo").build().get("2");
|
||||||
|
builder.addLeasesToRenew(renewer, "1", "2");
|
||||||
|
builder.renewMutateAssert(renewer, "1", "2");
|
||||||
|
|
||||||
|
// This should be a copy that doesn't get updated
|
||||||
|
Map<String, KinesisClientLease> heldLeases = renewer.getCurrentlyHeldLeases();
|
||||||
|
Assert.assertEquals(2, heldLeases.size());
|
||||||
|
Assert.assertEquals((Long) 1L, heldLeases.get("1").getLeaseCounter());
|
||||||
|
Assert.assertEquals((Long) 1L, heldLeases.get("2").getLeaseCounter());
|
||||||
|
|
||||||
|
leaseManager.updateLease(lease2); // lose lease 2
|
||||||
|
// Do another renewal and make sure the copy doesn't change
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
|
||||||
|
Assert.assertEquals(2, heldLeases.size());
|
||||||
|
Assert.assertEquals((Long) 1L, heldLeases.get("1").getLeaseCounter());
|
||||||
|
Assert.assertEquals((Long) 1L, heldLeases.get("2").getLeaseCounter());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUpdateLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "foo").build();
|
||||||
|
|
||||||
|
builder.addLeasesToRenew(renewer, "1");
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
|
||||||
|
KinesisClientLease expected = renewer.getCurrentlyHeldLease("1");
|
||||||
|
expected.setCheckpoint(new ExtendedSequenceNumber("new checkpoint"));
|
||||||
|
Assert.assertTrue(renewer.updateLease(expected, expected.getConcurrencyToken()));
|
||||||
|
|
||||||
|
// Assert that the counter and data have changed immediately after the update...
|
||||||
|
KinesisClientLease actual = renewer.getCurrentlyHeldLease("1");
|
||||||
|
expected.setLeaseCounter(expected.getLeaseCounter() + 1);
|
||||||
|
Assert.assertEquals(expected, actual);
|
||||||
|
|
||||||
|
// ...and after another round of renewal
|
||||||
|
renewer.renewLeases();
|
||||||
|
actual = renewer.getCurrentlyHeldLease("1");
|
||||||
|
expected.setLeaseCounter(expected.getLeaseCounter() + 1);
|
||||||
|
Assert.assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUpdateLostLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "foo").build();
|
||||||
|
|
||||||
|
builder.addLeasesToRenew(renewer, "1");
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
|
||||||
|
KinesisClientLease lease = renewer.getCurrentlyHeldLease("1");
|
||||||
|
|
||||||
|
// cause lease loss such that the renewer doesn't realize he's lost the lease when update is called
|
||||||
|
leaseManager.renewLease(lease);
|
||||||
|
|
||||||
|
// renewer still thinks he has the lease
|
||||||
|
Assert.assertNotNull(renewer.getCurrentlyHeldLease("1"));
|
||||||
|
lease.setCheckpoint(new ExtendedSequenceNumber("new checkpoint"));
|
||||||
|
|
||||||
|
// update fails
|
||||||
|
Assert.assertFalse(renewer.updateLease(lease, lease.getConcurrencyToken()));
|
||||||
|
// renewer no longer thinks he has the lease
|
||||||
|
Assert.assertNull(renewer.getCurrentlyHeldLease("1"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUpdateOldLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "foo").build();
|
||||||
|
|
||||||
|
builder.addLeasesToRenew(renewer, "1");
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
|
||||||
|
KinesisClientLease lease = renewer.getCurrentlyHeldLease("1");
|
||||||
|
|
||||||
|
// cause lease loss such that the renewer knows the lease has been lost when update is called
|
||||||
|
leaseManager.renewLease(lease);
|
||||||
|
builder.renewMutateAssert(renewer);
|
||||||
|
|
||||||
|
lease.setCheckpoint(new ExtendedSequenceNumber("new checkpoint"));
|
||||||
|
Assert.assertFalse(renewer.updateLease(lease, lease.getConcurrencyToken()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUpdateRegainedLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "foo").build();
|
||||||
|
|
||||||
|
builder.addLeasesToRenew(renewer, "1");
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
|
||||||
|
KinesisClientLease lease = renewer.getCurrentlyHeldLease("1");
|
||||||
|
|
||||||
|
// cause lease loss such that the renewer knows the lease has been lost when update is called
|
||||||
|
leaseManager.renewLease(lease);
|
||||||
|
builder.renewMutateAssert(renewer);
|
||||||
|
|
||||||
|
// regain the lease
|
||||||
|
builder.addLeasesToRenew(renewer, "1");
|
||||||
|
|
||||||
|
lease.setCheckpoint(new ExtendedSequenceNumber("new checkpoint"));
|
||||||
|
Assert.assertFalse(renewer.updateLease(lease, lease.getConcurrencyToken()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testIgnoreNoRenewalTimestamp() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
KinesisClientLease lease = builder.withLease("1", "foo").build().get("1");
|
||||||
|
lease.setLastCounterIncrementNanos(null);
|
||||||
|
|
||||||
|
renewer.addLeasesToRenew(Collections.singleton(lease));
|
||||||
|
|
||||||
|
Assert.assertEquals(0, renewer.getCurrentlyHeldLeases().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLeaseTimeout() throws LeasingException, InterruptedException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "foo").build();
|
||||||
|
|
||||||
|
builder.addLeasesToRenew(renewer, "1");
|
||||||
|
builder.renewMutateAssert(renewer, "1");
|
||||||
|
|
||||||
|
// TODO: Worth eliminating this sleep using the same pattern we used on LeaseTaker?
|
||||||
|
Thread.sleep(LEASE_DURATION_MILLIS); // Wait for the lease to timeout
|
||||||
|
|
||||||
|
Assert.assertEquals(0, renewer.getCurrentlyHeldLeases().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testInitialize() throws LeasingException {
|
||||||
|
final String shardId = "shd-0-0";
|
||||||
|
final String owner = "foo:8000";
|
||||||
|
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
builder.withLease(shardId, owner);
|
||||||
|
Map<String, KinesisClientLease> leases = builder.build();
|
||||||
|
LeaseRenewer<KinesisClientLease> renewer =new LeaseRenewer<KinesisClientLease>(
|
||||||
|
leaseManager, owner, 30000L, Executors.newCachedThreadPool());
|
||||||
|
renewer.initialize();
|
||||||
|
Map<String, KinesisClientLease> heldLeases = renewer.getCurrentlyHeldLeases();
|
||||||
|
Assert.assertEquals(leases.size(), heldLeases.size());
|
||||||
|
Assert.assertEquals(leases.keySet(), heldLeases.keySet());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,129 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.leases.impl;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.UUID;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
|
||||||
|
|
||||||
|
public class LeaseRenewerTest {
|
||||||
|
|
||||||
|
ILeaseManager<Lease> leaseManager;
|
||||||
|
String workerIdentifier;
|
||||||
|
long leaseDurationMillis;
|
||||||
|
ExecutorService leaseRenewalExecService;
|
||||||
|
LeaseRenewer<Lease> renewer;
|
||||||
|
List<Lease> leasesToRenew;
|
||||||
|
|
||||||
|
private static Lease newLease(String leaseKey,
|
||||||
|
String leaseOwner,
|
||||||
|
Long leaseCounter,
|
||||||
|
UUID concurrencyToken,
|
||||||
|
Long lastCounterIncrementNanos) {
|
||||||
|
Lease lease = new Lease();
|
||||||
|
lease.setLeaseKey(leaseKey);
|
||||||
|
lease.setLeaseOwner(leaseOwner);
|
||||||
|
lease.setLeaseCounter(leaseCounter);
|
||||||
|
lease.setConcurrencyToken(concurrencyToken);
|
||||||
|
lease.setLastCounterIncrementNanos(lastCounterIncrementNanos);
|
||||||
|
return lease;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Lease newLease(String leaseKey) {
|
||||||
|
return newLease(leaseKey, "leaseOwner", 0L, UUID.randomUUID(), System.nanoTime());
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Before
|
||||||
|
public void before() {
|
||||||
|
leaseManager = Mockito.mock(ILeaseManager.class);
|
||||||
|
workerIdentifier = "workerId";
|
||||||
|
leaseDurationMillis = 10000;
|
||||||
|
leaseRenewalExecService = Executors.newSingleThreadExecutor();
|
||||||
|
leasesToRenew = null;
|
||||||
|
renewer = new LeaseRenewer<>(leaseManager,
|
||||||
|
workerIdentifier,
|
||||||
|
leaseDurationMillis,
|
||||||
|
Executors.newCachedThreadPool());
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void after() throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
if (leasesToRenew == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for (Lease l : leasesToRenew) {
|
||||||
|
Mockito.verify(leaseManager, Mockito.times(1)).renewLease(l);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLeaseRenewerHoldsGoodLeases()
|
||||||
|
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
/*
|
||||||
|
* Prepare leases to be renewed
|
||||||
|
* 2 Good
|
||||||
|
*/
|
||||||
|
Lease lease1 = newLease("1");
|
||||||
|
Lease lease2 = newLease("2");
|
||||||
|
leasesToRenew =
|
||||||
|
Arrays.asList(lease1,lease2);
|
||||||
|
renewer.addLeasesToRenew(leasesToRenew);
|
||||||
|
|
||||||
|
Mockito.doReturn(true).when(leaseManager).renewLease(lease1);
|
||||||
|
Mockito.doReturn(true).when(leaseManager).renewLease(lease2);
|
||||||
|
|
||||||
|
renewer.renewLeases();
|
||||||
|
|
||||||
|
Assert.assertEquals(2, renewer.getCurrentlyHeldLeases().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLeaseRenewerDoesNotRenewExpiredLease() throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||||
|
String leaseKey = "expiredLease";
|
||||||
|
long initialCounterIncrementNanos = 5L; // "expired" time.
|
||||||
|
Lease lease1 = newLease(leaseKey);
|
||||||
|
lease1.setLastCounterIncrementNanos(initialCounterIncrementNanos);
|
||||||
|
|
||||||
|
leasesToRenew = new ArrayList<>();
|
||||||
|
leasesToRenew.add(lease1);
|
||||||
|
Mockito.doReturn(true).when(leaseManager).renewLease(lease1);
|
||||||
|
renewer.addLeasesToRenew(leasesToRenew);
|
||||||
|
|
||||||
|
Assert.assertTrue(lease1.isExpired(1, System.nanoTime()));
|
||||||
|
Assert.assertNull(renewer.getCurrentlyHeldLease(leaseKey));
|
||||||
|
renewer.renewLeases();
|
||||||
|
// Don't renew lease(s) with same key if getCurrentlyHeldLease returned null previously
|
||||||
|
Assert.assertNull(renewer.getCurrentlyHeldLease(leaseKey));
|
||||||
|
Assert.assertFalse(renewer.getCurrentlyHeldLeases().containsKey(leaseKey));
|
||||||
|
|
||||||
|
// Clear the list to avoid triggering expectation mismatch in after().
|
||||||
|
leasesToRenew.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,164 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.leases.impl;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.LeasingException;
|
||||||
|
|
||||||
|
public class LeaseTakerIntegrationTest extends LeaseIntegrationTest {
|
||||||
|
|
||||||
|
private static final long LEASE_DURATION_MILLIS = 1000L;
|
||||||
|
private LeaseTaker<KinesisClientLease> taker;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() {
|
||||||
|
taker = new LeaseTaker<KinesisClientLease>(leaseManager, "foo", LEASE_DURATION_MILLIS);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSimpleLeaseTake() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", null).build();
|
||||||
|
|
||||||
|
builder.takeMutateAssert(taker, "1");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNotTakeUpdatedLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "bar").build();
|
||||||
|
|
||||||
|
builder.takeMutateAssert(taker); // do a first scan to learn the state of the world
|
||||||
|
builder.renewAllLeases(); // renew leases
|
||||||
|
builder.passTime(LEASE_DURATION_MILLIS + 1);
|
||||||
|
|
||||||
|
builder.takeMutateAssert(taker); // do a second scan
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTakeOwnLease() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", taker.getWorkerIdentifier()).build();
|
||||||
|
|
||||||
|
builder.takeMutateAssert(taker); // do a first scan to learn the state of the world
|
||||||
|
builder.passTime(LEASE_DURATION_MILLIS + 1);
|
||||||
|
builder.takeMutateAssert(taker, "1"); // do a second scan, assert that we didn't take anything
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNotTakeNewOwnedLease() throws LeasingException, InterruptedException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "bar").build();
|
||||||
|
|
||||||
|
builder.takeMutateAssert(taker); // This should not take anything because the lease is new and owned.
|
||||||
|
builder.passTime(LEASE_DURATION_MILLIS + 1);
|
||||||
|
|
||||||
|
// This should take because the lease is old
|
||||||
|
builder.takeMutateAssert(taker, "1");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that we take leases non-greedily by setting up an environment where there are 4 leases and 2 workers,
|
||||||
|
* only one of which holds a lease. This leaves 3 free leases, but LeaseTaker should decide it needs 2 leases and
|
||||||
|
* only take 2.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testNonGreedyTake() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
for (int i = 0; i < 3; i++) {
|
||||||
|
builder.withLease(Integer.toString(i), null);
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.withLease("4", "bar").build();
|
||||||
|
|
||||||
|
builder.takeMutateAssert(taker, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that LeaseTaker does not steal when it's only short 1 lease and the other worker is at target. Set up a
|
||||||
|
* scenario where there are 4 leases held by two servers, and a third server with one lease. The third server should
|
||||||
|
* not steal.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testNoStealWhenOffByOne() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "bar")
|
||||||
|
.withLease("2", "bar")
|
||||||
|
.withLease("3", "baz")
|
||||||
|
.withLease("4", "baz")
|
||||||
|
.withLease("5", "foo")
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Takes nothing since all leases are new and owned and we won't steal if we're short by 1.
|
||||||
|
builder.takeMutateAssert(taker);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that one activity is stolen from the highest loaded server when a server needs more than one lease and no
|
||||||
|
* expired leases are available. Setup: 4 leases, server foo holds 0, bar holds 1, baz holds 5.
|
||||||
|
*
|
||||||
|
* Foo should steal from baz.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSteal() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", "bar");
|
||||||
|
for (int i = 2; i <= 6; i++) {
|
||||||
|
String shardId = Integer.toString(i);
|
||||||
|
builder.withLease(shardId, "baz");
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.build();
|
||||||
|
|
||||||
|
// Assert that one lease was stolen from baz.
|
||||||
|
Map<String, KinesisClientLease> takenLeases = builder.takeMutateAssert(taker, 1);
|
||||||
|
|
||||||
|
// Assert that it was one of baz's leases (shardId != 1)
|
||||||
|
String shardIdStolen = takenLeases.keySet().iterator().next();
|
||||||
|
Assert.assertFalse(shardIdStolen.equals("1"));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Verify that stealing does not happen if LeaseTaker takes at least one expired lease, even if it needs more than
|
||||||
|
* one.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testNoStealWhenExpiredLeases() throws LeasingException {
|
||||||
|
TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager);
|
||||||
|
|
||||||
|
builder.withLease("1", null);
|
||||||
|
for (int i = 2; i <= 4; i++) {
|
||||||
|
String shardId = Integer.toString(i);
|
||||||
|
builder.withLease(shardId, "bar");
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.build();
|
||||||
|
|
||||||
|
// Assert that the unowned lease was taken and we did not steal anything from bar
|
||||||
|
builder.takeMutateAssert(taker, "1");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,75 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.leases.impl;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class LeaseTakerTest {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws java.lang.Exception
|
||||||
|
*/
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test method for {@link com.amazonaws.services.kinesis.leases.impl.LeaseTaker#stringJoin(java.util.Collection, java.lang.String)}.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public final void testStringJoin() {
|
||||||
|
List<String> strings = new ArrayList<>();
|
||||||
|
|
||||||
|
strings.add("foo");
|
||||||
|
Assert.assertEquals("foo", LeaseTaker.stringJoin(strings, ", "));
|
||||||
|
|
||||||
|
strings.add("bar");
|
||||||
|
Assert.assertEquals("foo, bar", LeaseTaker.stringJoin(strings, ", "));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,168 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.leases.impl;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.Callable;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.exceptions.LeasingException;
|
||||||
|
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseRenewer;
|
||||||
|
|
||||||
|
public class TestHarnessBuilder {
|
||||||
|
|
||||||
|
private long currentTimeNanos;
|
||||||
|
|
||||||
|
private Map<String, KinesisClientLease> leases = new HashMap<String, KinesisClientLease>();
|
||||||
|
private KinesisClientLeaseManager leaseManager;
|
||||||
|
|
||||||
|
private Callable<Long> timeProvider = new Callable<Long>() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Long call() throws Exception {
|
||||||
|
return currentTimeNanos;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
public TestHarnessBuilder(KinesisClientLeaseManager leaseManager) {
|
||||||
|
this.leaseManager = leaseManager;
|
||||||
|
}
|
||||||
|
|
||||||
|
public TestHarnessBuilder withLease(String shardId) {
|
||||||
|
return withLease(shardId, "leaseOwner");
|
||||||
|
}
|
||||||
|
|
||||||
|
public TestHarnessBuilder withLease(String shardId, String owner) {
|
||||||
|
KinesisClientLease lease = new KinesisClientLease();
|
||||||
|
lease.setCheckpoint(new ExtendedSequenceNumber("checkpoint"));
|
||||||
|
lease.setOwnerSwitchesSinceCheckpoint(0L);
|
||||||
|
lease.setLeaseCounter(0L);
|
||||||
|
lease.setLeaseOwner(owner);
|
||||||
|
lease.setParentShardIds(Collections.singleton("parentShardId"));
|
||||||
|
lease.setLeaseKey(shardId);
|
||||||
|
|
||||||
|
leases.put(shardId, lease);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, KinesisClientLease> build() throws LeasingException {
|
||||||
|
for (KinesisClientLease lease : leases.values()) {
|
||||||
|
leaseManager.createLeaseIfNotExists(lease);
|
||||||
|
if (lease.getLeaseOwner() != null) {
|
||||||
|
lease.setLastCounterIncrementNanos(System.nanoTime());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
currentTimeNanos = System.nanoTime();
|
||||||
|
|
||||||
|
return leases;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void passTime(long millis) {
|
||||||
|
currentTimeNanos += millis * 1000000;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, KinesisClientLease> takeMutateAssert(LeaseTaker<KinesisClientLease> taker, int numToTake)
|
||||||
|
throws LeasingException {
|
||||||
|
Map<String, KinesisClientLease> result = taker.takeLeases(timeProvider);
|
||||||
|
Assert.assertEquals(numToTake, result.size());
|
||||||
|
|
||||||
|
for (KinesisClientLease actual : result.values()) {
|
||||||
|
KinesisClientLease original = leases.get(actual.getLeaseKey());
|
||||||
|
Assert.assertNotNull(original);
|
||||||
|
|
||||||
|
mutateAssert(taker.getWorkerIdentifier(), original, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, KinesisClientLease> takeMutateAssert(LeaseTaker<KinesisClientLease> taker, String... takenShardIds)
|
||||||
|
throws LeasingException {
|
||||||
|
Map<String, KinesisClientLease> result = taker.takeLeases(timeProvider);
|
||||||
|
Assert.assertEquals(takenShardIds.length, result.size());
|
||||||
|
|
||||||
|
for (String shardId : takenShardIds) {
|
||||||
|
KinesisClientLease original = leases.get(shardId);
|
||||||
|
Assert.assertNotNull(original);
|
||||||
|
|
||||||
|
KinesisClientLease actual = result.get(shardId);
|
||||||
|
Assert.assertNotNull(actual);
|
||||||
|
|
||||||
|
mutateAssert(taker.getWorkerIdentifier(), original, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void mutateAssert(String newWorkerIdentifier, KinesisClientLease original, KinesisClientLease actual) {
|
||||||
|
original.setLeaseCounter(original.getLeaseCounter() + 1);
|
||||||
|
if (original.getLeaseOwner() != null && !newWorkerIdentifier.equals(original.getLeaseOwner())) {
|
||||||
|
original.setOwnerSwitchesSinceCheckpoint(original.getOwnerSwitchesSinceCheckpoint() + 1);
|
||||||
|
}
|
||||||
|
original.setLeaseOwner(newWorkerIdentifier);
|
||||||
|
|
||||||
|
Assert.assertEquals(original, actual); // Assert the contents of the lease
|
||||||
|
}
|
||||||
|
|
||||||
|
public void addLeasesToRenew(ILeaseRenewer<KinesisClientLease> renewer, String... shardIds)
|
||||||
|
throws DependencyException, InvalidStateException {
|
||||||
|
List<KinesisClientLease> leasesToRenew = new ArrayList<KinesisClientLease>();
|
||||||
|
|
||||||
|
for (String shardId : shardIds) {
|
||||||
|
KinesisClientLease lease = leases.get(shardId);
|
||||||
|
Assert.assertNotNull(lease);
|
||||||
|
leasesToRenew.add(lease);
|
||||||
|
}
|
||||||
|
|
||||||
|
renewer.addLeasesToRenew(leasesToRenew);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, KinesisClientLease> renewMutateAssert(ILeaseRenewer<KinesisClientLease> renewer, String... renewedShardIds)
|
||||||
|
throws DependencyException, InvalidStateException {
|
||||||
|
renewer.renewLeases();
|
||||||
|
|
||||||
|
Map<String, KinesisClientLease> heldLeases = renewer.getCurrentlyHeldLeases();
|
||||||
|
Assert.assertEquals(renewedShardIds.length, heldLeases.size());
|
||||||
|
|
||||||
|
for (String shardId : renewedShardIds) {
|
||||||
|
KinesisClientLease original = leases.get(shardId);
|
||||||
|
Assert.assertNotNull(original);
|
||||||
|
|
||||||
|
KinesisClientLease actual = heldLeases.get(shardId);
|
||||||
|
Assert.assertNotNull(actual);
|
||||||
|
|
||||||
|
original.setLeaseCounter(original.getLeaseCounter() + 1);
|
||||||
|
Assert.assertEquals(original, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
return heldLeases;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void renewAllLeases() throws LeasingException {
|
||||||
|
for (KinesisClientLease lease : leases.values()) {
|
||||||
|
leaseManager.renewLease(lease);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.metrics.impl;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.cloudwatch.model.MetricDatum;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.StandardUnit;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.AccumulateByNameMetricsScope;
|
||||||
|
|
||||||
|
public class AccumulatingMetricsScopeTest {
|
||||||
|
|
||||||
|
private static class TestScope extends AccumulateByNameMetricsScope {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void end() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public void assertMetrics(MetricDatum... expectedData) {
|
||||||
|
for (MetricDatum expected : expectedData) {
|
||||||
|
MetricDatum actual = data.remove(expected.getMetricName());
|
||||||
|
Assert.assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.assertEquals("Data should be empty at the end of assertMetrics", 0, data.size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSingleAdd() {
|
||||||
|
TestScope scope = new TestScope();
|
||||||
|
|
||||||
|
scope.addData("name", 2.0, StandardUnit.Count);
|
||||||
|
scope.assertMetrics(TestHelper.constructDatum("name", StandardUnit.Count, 2.0, 2.0, 2.0, 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAccumulate() {
|
||||||
|
TestScope scope = new TestScope();
|
||||||
|
|
||||||
|
scope.addData("name", 2.0, StandardUnit.Count);
|
||||||
|
scope.addData("name", 3.0, StandardUnit.Count);
|
||||||
|
scope.assertMetrics(TestHelper.constructDatum("name", StandardUnit.Count, 3.0, 2.0, 5.0, 2));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public void testAccumulateWrongUnit() {
|
||||||
|
TestScope scope = new TestScope();
|
||||||
|
|
||||||
|
scope.addData("name", 2.0, StandardUnit.Count);
|
||||||
|
scope.addData("name", 3.0, StandardUnit.Megabits);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,196 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.metrics.impl;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import com.amazonaws.services.cloudwatch.model.MetricDatum;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.StandardUnit;
|
||||||
|
|
||||||
|
public class CWPublisherRunnableTest {
|
||||||
|
|
||||||
|
private static final int MAX_QUEUE_SIZE = 5;
|
||||||
|
private static final long MAX_BUFFER_TIME_MILLIS = 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For tests to run properly, FLUSH_SIZE should be > 1 and < MAX_QUEUE_SIZE / 2
|
||||||
|
*/
|
||||||
|
private static final int FLUSH_SIZE = 2;
|
||||||
|
|
||||||
|
private static class TestHarness {
|
||||||
|
private List<MetricDatumWithKey<CWMetricKey>> data = new ArrayList<MetricDatumWithKey<CWMetricKey>>();
|
||||||
|
private int counter = 0;
|
||||||
|
private ICWMetricsPublisher<CWMetricKey> publisher;
|
||||||
|
private CWPublisherRunnable<CWMetricKey> runnable;
|
||||||
|
private long time = 0L;
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
public TestHarness() {
|
||||||
|
publisher = Mockito.mock(ICWMetricsPublisher.class);
|
||||||
|
runnable = new CWPublisherRunnable<CWMetricKey>(publisher,
|
||||||
|
MAX_BUFFER_TIME_MILLIS,
|
||||||
|
MAX_QUEUE_SIZE,
|
||||||
|
FLUSH_SIZE) {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected long getTime() {
|
||||||
|
return time;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public void enqueueRandom(int count) {
|
||||||
|
for (int i = 0; i < count; i++) {
|
||||||
|
int value = counter++;
|
||||||
|
data.add(constructDatum(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
runnable.enqueue(data.subList(data.size() - count, data.size()));
|
||||||
|
}
|
||||||
|
|
||||||
|
private MetricDatumWithKey<CWMetricKey> constructDatum(int value) {
|
||||||
|
MetricDatum datum = TestHelper.constructDatum("datum-" + Integer.toString(value),
|
||||||
|
StandardUnit.Count,
|
||||||
|
value,
|
||||||
|
value,
|
||||||
|
value,
|
||||||
|
1);
|
||||||
|
|
||||||
|
return new MetricDatumWithKey<CWMetricKey>(new CWMetricKey(datum), datum);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run one iteration of the runnable and assert that it called CloudWatch with count records beginning with
|
||||||
|
* record startIndex, and no more than that.
|
||||||
|
*
|
||||||
|
* @param startIndex
|
||||||
|
* @param count
|
||||||
|
*/
|
||||||
|
public void runAndAssert(int startIndex, int count) {
|
||||||
|
runnable.runOnce();
|
||||||
|
|
||||||
|
if (count > 0) {
|
||||||
|
Mockito.verify(publisher).publishMetrics(data.subList(startIndex, startIndex + count));
|
||||||
|
}
|
||||||
|
|
||||||
|
Mockito.verifyNoMoreInteractions(publisher);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run one iteration of the runnable and assert that it called CloudWatch with all data.
|
||||||
|
*/
|
||||||
|
public void runAndAssertAllData() {
|
||||||
|
runAndAssert(0, data.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void passTime(long time) {
|
||||||
|
this.time += time;
|
||||||
|
}
|
||||||
|
|
||||||
|
public CWPublisherRunnable<CWMetricKey> getRunnable() {
|
||||||
|
return runnable;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private TestHarness harness;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() {
|
||||||
|
harness = new TestHarness();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enqueue a full batch of data. Without allowing time to pass, assert that the runnable sends all data.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testPublishOnFlushSize() {
|
||||||
|
harness.enqueueRandom(FLUSH_SIZE);
|
||||||
|
harness.runAndAssertAllData();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enqueue 1 message. Without allowing time to pass, assert that the runnable sends nothing.
|
||||||
|
* Pass MAX_BUFFER_TIME_MILLIS of time, then assert that the runnable sends all data. Enqueue another message.
|
||||||
|
* Repeat timing/assertion pattern.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testWaitForBatchTimeout() {
|
||||||
|
harness.enqueueRandom(1);
|
||||||
|
harness.runAndAssert(0, 0);
|
||||||
|
harness.passTime(MAX_BUFFER_TIME_MILLIS);
|
||||||
|
harness.runAndAssertAllData();
|
||||||
|
|
||||||
|
harness.enqueueRandom(1);
|
||||||
|
harness.runAndAssert(0, 0);
|
||||||
|
harness.passTime(MAX_BUFFER_TIME_MILLIS);
|
||||||
|
harness.runAndAssert(1, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enqueue two batches + 1 datum. Without allowing time to pass, assert that the runnable sends all but the last
|
||||||
|
* datum. Pass MAX_BUFFER_TIME_MILLIS of time, then assert that the runnable sends the last datum.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDrainQueue() {
|
||||||
|
int numBatches = 2;
|
||||||
|
harness.enqueueRandom(FLUSH_SIZE * numBatches);
|
||||||
|
harness.enqueueRandom(1);
|
||||||
|
for (int i = 0; i < numBatches; i++) {
|
||||||
|
harness.runAndAssert(i * FLUSH_SIZE, FLUSH_SIZE);
|
||||||
|
}
|
||||||
|
harness.runAndAssert(0, 0);
|
||||||
|
harness.passTime(MAX_BUFFER_TIME_MILLIS);
|
||||||
|
harness.runAndAssert(numBatches * FLUSH_SIZE, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enqueue BATCH_SIZE + 1 messages. Shutdown the runnable. Without passing time, assert that the runnable sends all
|
||||||
|
* data and isShutdown() returns false until all data is sent.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testShutdown() {
|
||||||
|
harness.enqueueRandom(FLUSH_SIZE + 1);
|
||||||
|
harness.getRunnable().shutdown();
|
||||||
|
|
||||||
|
harness.runAndAssert(0, FLUSH_SIZE);
|
||||||
|
Assert.assertFalse(harness.getRunnable().isShutdown());
|
||||||
|
|
||||||
|
harness.runAndAssert(FLUSH_SIZE, 1);
|
||||||
|
Assert.assertTrue(harness.getRunnable().isShutdown());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enqueue MAX_QUEUE_SIZE + 1 messages. Shutdown the runnable. Assert that the runnable sends all but the last
|
||||||
|
* datum and is shut down afterwards.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testQueueFullDropData() {
|
||||||
|
int numRecords = MAX_QUEUE_SIZE + 1;
|
||||||
|
harness.enqueueRandom(numRecords);
|
||||||
|
harness.getRunnable().shutdown();
|
||||||
|
for (int i = 0; i < MAX_QUEUE_SIZE; i += FLUSH_SIZE) {
|
||||||
|
harness.runAndAssert(i, Math.min(MAX_QUEUE_SIZE - i, FLUSH_SIZE));
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.assertTrue(harness.getRunnable().isShutdown());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,107 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.metrics.impl;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.ArgumentCaptor;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.MetricDatum;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.PutMetricDataRequest;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.StandardUnit;
|
||||||
|
|
||||||
|
public class DefaultCWMetricsPublisherTest {
|
||||||
|
|
||||||
|
private final String NAMESPACE = "fakeNamespace";
|
||||||
|
private final AmazonCloudWatch cloudWatchClient = Mockito.mock(AmazonCloudWatch.class);
|
||||||
|
private DefaultCWMetricsPublisher publisher = new DefaultCWMetricsPublisher(cloudWatchClient, NAMESPACE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test whether the data input into metrics publisher is the equal to the data which will be published to CW
|
||||||
|
*/
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMetricsPublisher() {
|
||||||
|
List<MetricDatumWithKey<CWMetricKey>> dataToPublish = constructMetricDatumWithKeyList(25);
|
||||||
|
List<Map<String, MetricDatum>> expectedData = constructMetricDatumListMap(dataToPublish);
|
||||||
|
publisher.publishMetrics(dataToPublish);
|
||||||
|
|
||||||
|
ArgumentCaptor<PutMetricDataRequest> argument = ArgumentCaptor.forClass(PutMetricDataRequest.class);
|
||||||
|
Mockito.verify(cloudWatchClient, Mockito.atLeastOnce()).putMetricData(argument.capture());
|
||||||
|
|
||||||
|
List<PutMetricDataRequest> requests = argument.getAllValues();
|
||||||
|
Assert.assertEquals(expectedData.size(), requests.size());
|
||||||
|
|
||||||
|
for (int i = 0; i < requests.size(); i++) {
|
||||||
|
assertMetricData(expectedData.get(i), requests.get(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public static List<MetricDatumWithKey<CWMetricKey>> constructMetricDatumWithKeyList(int value) {
|
||||||
|
List<MetricDatumWithKey<CWMetricKey>> data = new ArrayList<MetricDatumWithKey<CWMetricKey>>();
|
||||||
|
for (int i = 1; i <= value; i++) {
|
||||||
|
MetricDatum datum =
|
||||||
|
TestHelper.constructDatum("datum" + Integer.toString(i), StandardUnit.Count, i, i, i, 1);
|
||||||
|
data.add(new MetricDatumWithKey<CWMetricKey>(new CWMetricKey(datum), datum));
|
||||||
|
}
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchSize is the number of metrics sent in a single request.
|
||||||
|
// In DefaultCWMetricsPublisher this number is set to 20.
|
||||||
|
public List<Map<String, MetricDatum>> constructMetricDatumListMap(List<MetricDatumWithKey<CWMetricKey>> data) {
|
||||||
|
int batchSize = 20;
|
||||||
|
List<Map<String, MetricDatum>> dataList = new ArrayList<Map<String, MetricDatum>>();
|
||||||
|
|
||||||
|
int expectedRequestcount = (int) Math.ceil(data.size() / 20.0);
|
||||||
|
|
||||||
|
for (int i = 0; i < expectedRequestcount; i++) {
|
||||||
|
dataList.add(i, new HashMap<String, MetricDatum>());
|
||||||
|
}
|
||||||
|
|
||||||
|
int batchIndex = 1;
|
||||||
|
int listIndex = 0;
|
||||||
|
for (MetricDatumWithKey<CWMetricKey> metricDatumWithKey : data) {
|
||||||
|
if (batchIndex > batchSize) {
|
||||||
|
batchIndex = 1;
|
||||||
|
listIndex++;
|
||||||
|
}
|
||||||
|
batchIndex++;
|
||||||
|
dataList.get(listIndex).put(metricDatumWithKey.datum.getMetricName(), metricDatumWithKey.datum);
|
||||||
|
}
|
||||||
|
return dataList;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void assertMetricData(Map<String, MetricDatum> expected, PutMetricDataRequest actual) {
|
||||||
|
List<MetricDatum> actualData = actual.getMetricData();
|
||||||
|
for (MetricDatum actualDatum : actualData) {
|
||||||
|
String metricName = actualDatum.getMetricName();
|
||||||
|
Assert.assertNotNull(expected.get(metricName));
|
||||||
|
Assert.assertTrue(expected.get(metricName).equals(actualDatum));
|
||||||
|
expected.remove(metricName);
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.assertTrue(expected.isEmpty());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,60 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.metrics.impl;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.cloudwatch.model.StandardUnit;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.impl.EndingMetricsScope;
|
||||||
|
|
||||||
|
public class EndingMetricsScopeTest {
|
||||||
|
|
||||||
|
private static class TestScope extends EndingMetricsScope {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAddDataNotEnded() {
|
||||||
|
TestScope scope = new TestScope();
|
||||||
|
scope.addData("foo", 1.0, StandardUnit.Count);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAddDimensionNotEnded() {
|
||||||
|
TestScope scope = new TestScope();
|
||||||
|
scope.addDimension("foo", "bar");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public void testAddDataEnded() {
|
||||||
|
TestScope scope = new TestScope();
|
||||||
|
scope.end();
|
||||||
|
scope.addData("foo", 1.0, StandardUnit.Count);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public void testAddDimensionEnded() {
|
||||||
|
TestScope scope = new TestScope();
|
||||||
|
scope.end();
|
||||||
|
scope.addDimension("foo", "bar");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
public void testDoubleEnd() {
|
||||||
|
TestScope scope = new TestScope();
|
||||||
|
scope.end();
|
||||||
|
scope.end();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,125 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.metrics.impl;
|
||||||
|
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.cloudwatch.model.Dimension;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.MetricDatum;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.StandardUnit;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope;
|
||||||
|
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
|
||||||
|
import com.google.common.collect.ImmutableSet;
|
||||||
|
|
||||||
|
public class FilteringMetricsScopeTest {
|
||||||
|
|
||||||
|
private static class TestScope extends FilteringMetricsScope {
|
||||||
|
|
||||||
|
private TestScope() {
|
||||||
|
}
|
||||||
|
|
||||||
|
private TestScope(MetricsLevel metricsLevel, Set<String> metricsEnabledDimensions) {
|
||||||
|
super(metricsLevel, metricsEnabledDimensions);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void assertMetrics(MetricDatum... expectedData) {
|
||||||
|
for (MetricDatum expected : expectedData) {
|
||||||
|
MetricDatum actual = data.remove(expected.getMetricName());
|
||||||
|
Assert.assertEquals(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.assertEquals("Data should be empty at the end of assertMetrics", 0, data.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void assertDimensions(Dimension... dimensions) {
|
||||||
|
for (Dimension dimension : dimensions) {
|
||||||
|
Assert.assertTrue(getDimensions().remove(dimension));
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.assertTrue("Dimensions should be empty at the end of assertDimensions", getDimensions().isEmpty());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDefaultAddAll() {
|
||||||
|
TestScope scope = new TestScope();
|
||||||
|
scope.addData("detailedDataName", 2.0, StandardUnit.Count, MetricsLevel.DETAILED);
|
||||||
|
scope.addData("noLevelDataName", 3.0, StandardUnit.Milliseconds);
|
||||||
|
scope.addDimension("dimensionName", "dimensionValue");
|
||||||
|
|
||||||
|
// By default all metrics and dimensions should be allowed.
|
||||||
|
scope.assertMetrics(
|
||||||
|
TestHelper.constructDatum("detailedDataName", StandardUnit.Count, 2.0, 2.0, 2.0, 1),
|
||||||
|
TestHelper.constructDatum("noLevelDataName", StandardUnit.Milliseconds, 3.0, 3.0, 3.0, 1.0));
|
||||||
|
scope.assertDimensions(TestHelper.constructDimension("dimensionName", "dimensionValue"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMetricsLevel() {
|
||||||
|
TestScope scope = new TestScope(MetricsLevel.SUMMARY, null);
|
||||||
|
scope.addData("summaryDataName", 2.0, StandardUnit.Count, MetricsLevel.SUMMARY);
|
||||||
|
scope.addData("summaryDataName", 10.0, StandardUnit.Count, MetricsLevel.SUMMARY);
|
||||||
|
scope.addData("detailedDataName", 4.0, StandardUnit.Bytes, MetricsLevel.DETAILED);
|
||||||
|
scope.addData("noLevelDataName", 3.0, StandardUnit.Milliseconds);
|
||||||
|
|
||||||
|
scope.assertMetrics(TestHelper.constructDatum("summaryDataName", StandardUnit.Count, 10.0, 2.0, 12.0, 2.0));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMetricsLevelNone() {
|
||||||
|
TestScope scope = new TestScope(MetricsLevel.NONE, null);
|
||||||
|
scope.addData("summaryDataName", 2.0, StandardUnit.Count, MetricsLevel.SUMMARY);
|
||||||
|
scope.addData("summaryDataName", 10.0, StandardUnit.Count, MetricsLevel.SUMMARY);
|
||||||
|
scope.addData("detailedDataName", 4.0, StandardUnit.Bytes, MetricsLevel.DETAILED);
|
||||||
|
scope.addData("noLevelDataName", 3.0, StandardUnit.Milliseconds);
|
||||||
|
|
||||||
|
// No metrics should be emitted.
|
||||||
|
scope.assertMetrics();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMetricsDimensions() {
|
||||||
|
TestScope scope = new TestScope(MetricsLevel.DETAILED, ImmutableSet.of("ShardId"));
|
||||||
|
scope.addDimension("ShardId", "shard-0001");
|
||||||
|
scope.addDimension("Operation", "ProcessRecords");
|
||||||
|
scope.addDimension("ShardId", "shard-0001");
|
||||||
|
scope.addDimension("ShardId", "shard-0002");
|
||||||
|
scope.addDimension("WorkerIdentifier", "testworker");
|
||||||
|
|
||||||
|
scope.assertDimensions(
|
||||||
|
TestHelper.constructDimension("ShardId", "shard-0001"),
|
||||||
|
TestHelper.constructDimension("ShardId", "shard-0002"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMetricsDimensionsAll() {
|
||||||
|
TestScope scope = new TestScope(MetricsLevel.DETAILED, ImmutableSet.of(
|
||||||
|
"ThisDoesNotMatter", IMetricsScope.METRICS_DIMENSIONS_ALL, "ThisAlsoDoesNotMatter"));
|
||||||
|
scope.addDimension("ShardId", "shard-0001");
|
||||||
|
scope.addDimension("Operation", "ProcessRecords");
|
||||||
|
scope.addDimension("ShardId", "shard-0001");
|
||||||
|
scope.addDimension("ShardId", "shard-0002");
|
||||||
|
scope.addDimension("WorkerIdentifier", "testworker");
|
||||||
|
|
||||||
|
scope.assertDimensions(
|
||||||
|
TestHelper.constructDimension("ShardId", "shard-0001"),
|
||||||
|
TestHelper.constructDimension("ShardId", "shard-0002"),
|
||||||
|
TestHelper.constructDimension("Operation", "ProcessRecords"),
|
||||||
|
TestHelper.constructDimension("WorkerIdentifier", "testworker"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,96 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.metrics.impl;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.cloudwatch.model.Dimension;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.MetricDatum;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.StandardUnit;
|
||||||
|
|
||||||
|
public class MetricAccumulatingQueueTest {
|
||||||
|
|
||||||
|
private static final int MAX_QUEUE_SIZE = 5;
|
||||||
|
private MetricAccumulatingQueue<CWMetricKey> queue;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() {
|
||||||
|
this.queue = new MetricAccumulatingQueue<CWMetricKey>(MAX_QUEUE_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test whether the MetricDatums offered into the queue will accumulate data based on the same metricName and
|
||||||
|
* output those datums with the correctly accumulated output.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testAccumulation() {
|
||||||
|
Collection<Dimension> dimensionsA = Collections.singleton(new Dimension().withName("name").withValue("a"));
|
||||||
|
Collection<Dimension> dimensionsB = Collections.singleton(new Dimension().withName("name").withValue("b"));
|
||||||
|
String keyA = "a";
|
||||||
|
String keyB = "b";
|
||||||
|
|
||||||
|
MetricDatum datum1 =
|
||||||
|
TestHelper.constructDatum(keyA, StandardUnit.Count, 10, 5, 15, 2).withDimensions(dimensionsA);
|
||||||
|
queue.offer(new CWMetricKey(datum1), datum1);
|
||||||
|
MetricDatum datum2 =
|
||||||
|
TestHelper.constructDatum(keyA, StandardUnit.Count, 1, 1, 2, 2).withDimensions(dimensionsA);
|
||||||
|
queue.offer(new CWMetricKey(datum2), datum2);
|
||||||
|
|
||||||
|
MetricDatum datum3 =
|
||||||
|
TestHelper.constructDatum(keyA, StandardUnit.Count, 1, 1, 2, 2).withDimensions(dimensionsB);
|
||||||
|
queue.offer(new CWMetricKey(datum3), datum3);
|
||||||
|
|
||||||
|
MetricDatum datum4 = TestHelper.constructDatum(keyA, StandardUnit.Count, 1, 1, 2, 2);
|
||||||
|
queue.offer(new CWMetricKey(datum4), datum4);
|
||||||
|
queue.offer(new CWMetricKey(datum4), datum4);
|
||||||
|
|
||||||
|
MetricDatum datum5 =
|
||||||
|
TestHelper.constructDatum(keyB, StandardUnit.Count, 100, 10, 110, 2).withDimensions(dimensionsA);
|
||||||
|
queue.offer(new CWMetricKey(datum5), datum5);
|
||||||
|
|
||||||
|
Assert.assertEquals(4, queue.size());
|
||||||
|
List<MetricDatumWithKey<CWMetricKey>> items = queue.drain(4);
|
||||||
|
|
||||||
|
Assert.assertEquals(items.get(0).datum, TestHelper.constructDatum(keyA, StandardUnit.Count, 10, 1, 17, 4)
|
||||||
|
.withDimensions(dimensionsA));
|
||||||
|
Assert.assertEquals(items.get(1).datum, datum3);
|
||||||
|
Assert.assertEquals(items.get(2).datum, TestHelper.constructDatum(keyA, StandardUnit.Count, 1, 1, 4, 4));
|
||||||
|
Assert.assertEquals(items.get(3).datum, TestHelper.constructDatum(keyB, StandardUnit.Count, 100, 10, 110, 2)
|
||||||
|
.withDimensions(dimensionsA));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test that the number of MetricDatum that can be added to our queue is capped at the MAX_QUEUE_SIZE.
|
||||||
|
* Therefore, any datums added to the queue that is greater than the capacity of our queue will be dropped.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDrop() {
|
||||||
|
for (int i = 0; i < MAX_QUEUE_SIZE; i++) {
|
||||||
|
MetricDatum datum = TestHelper.constructDatum(Integer.toString(i), StandardUnit.Count, 1, 1, 2, 2);
|
||||||
|
CWMetricKey key = new CWMetricKey(datum);
|
||||||
|
Assert.assertTrue(queue.offer(key, datum));
|
||||||
|
}
|
||||||
|
|
||||||
|
MetricDatum datum = TestHelper.constructDatum("foo", StandardUnit.Count, 1, 1, 2, 2);
|
||||||
|
Assert.assertFalse(queue.offer(new CWMetricKey(datum), datum));
|
||||||
|
Assert.assertEquals(MAX_QUEUE_SIZE, queue.size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.metrics.impl;
|
||||||
|
|
||||||
|
import com.amazonaws.services.cloudwatch.model.Dimension;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.MetricDatum;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.StandardUnit;
|
||||||
|
import com.amazonaws.services.cloudwatch.model.StatisticSet;
|
||||||
|
|
||||||
|
public class TestHelper {
|
||||||
|
public static MetricDatum constructDatum(String name,
|
||||||
|
StandardUnit unit,
|
||||||
|
double maximum,
|
||||||
|
double minimum,
|
||||||
|
double sum,
|
||||||
|
double count) {
|
||||||
|
return new MetricDatum().withMetricName(name)
|
||||||
|
.withUnit(unit)
|
||||||
|
.withStatisticValues(new StatisticSet().withMaximum(maximum)
|
||||||
|
.withMinimum(minimum)
|
||||||
|
.withSum(sum)
|
||||||
|
.withSampleCount(count));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Dimension constructDimension(String name, String value) {
|
||||||
|
return new Dimension().withName(name).withValue(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,197 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.multilang;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
|
import org.mockito.stubbing.Answer;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.Message;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.StatusMessage;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
public class MessageReaderTest {
|
||||||
|
|
||||||
|
private static final String shardId = "shard-123";
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This line is based on the definition of the protocol for communication between the KCL record processor and
|
||||||
|
* the client's process.
|
||||||
|
*/
|
||||||
|
private String buildCheckpointLine(String sequenceNumber) {
|
||||||
|
return String.format("{\"action\":\"checkpoint\", \"checkpoint\":\"%s\"}", sequenceNumber);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This line is based on the definition of the protocol for communication between the KCL record processor and
|
||||||
|
* the client's process.
|
||||||
|
*/
|
||||||
|
private String buildStatusLine(String methodName) {
|
||||||
|
return String.format("{\"action\":\"status\", \"responseFor\":\"%s\"}", methodName);
|
||||||
|
}
|
||||||
|
|
||||||
|
private InputStream buildInputStreamOfGoodInput(String[] sequenceNumbers, String[] responseFors) {
|
||||||
|
// Just interlace the lines
|
||||||
|
StringBuilder stringBuilder = new StringBuilder();
|
||||||
|
// This is just a reminder to anyone who changes the arrays
|
||||||
|
Assert.assertTrue(responseFors.length == sequenceNumbers.length + 1);
|
||||||
|
stringBuilder.append(buildStatusLine(responseFors[0]));
|
||||||
|
stringBuilder.append("\n");
|
||||||
|
// Also a white space line, which it should be able to handle with out failing.
|
||||||
|
stringBuilder.append(" \n");
|
||||||
|
// Also a bogus data line, which it should be able to handle with out failing.
|
||||||
|
stringBuilder.append(" bogus data \n");
|
||||||
|
for (int i = 0; i < Math.min(sequenceNumbers.length, responseFors.length); i++) {
|
||||||
|
stringBuilder.append(buildCheckpointLine(sequenceNumbers[i]));
|
||||||
|
stringBuilder.append("\n");
|
||||||
|
stringBuilder.append(buildStatusLine(responseFors[i + 1]));
|
||||||
|
stringBuilder.append("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
return new ByteArrayInputStream(stringBuilder.toString().getBytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void runLoopGoodInputTest() {
|
||||||
|
String[] sequenceNumbers = new String[] { "123", "456", "789" };
|
||||||
|
String[] responseFors = new String[] { "initialize", "processRecords", "processRecords", "shutdown" };
|
||||||
|
InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors);
|
||||||
|
MessageReader reader =
|
||||||
|
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
|
|
||||||
|
for (String responseFor : responseFors) {
|
||||||
|
StatusMessage statusMessage = null;
|
||||||
|
try {
|
||||||
|
Message message = reader.getNextMessageFromSTDOUT().get();
|
||||||
|
if (message instanceof StatusMessage) {
|
||||||
|
Assert.assertEquals("The status message's responseFor field should have been correct", responseFor,
|
||||||
|
((StatusMessage) message).getResponseFor());
|
||||||
|
}
|
||||||
|
} catch (InterruptedException | ExecutionException e) {
|
||||||
|
Assert.fail("There should have been a status message for " + responseFor);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void drainInputTest() throws InterruptedException, ExecutionException {
|
||||||
|
String[] sequenceNumbers = new String[] { "123", "456", "789" };
|
||||||
|
String[] responseFors = new String[] { "initialize", "processRecords", "processRecords", "shutdown" };
|
||||||
|
InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors);
|
||||||
|
|
||||||
|
MessageReader reader =
|
||||||
|
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
|
Future<Boolean> drainFuture = reader.drainSTDOUT();
|
||||||
|
Boolean drainResult = drainFuture.get();
|
||||||
|
Assert.assertNotNull(drainResult);
|
||||||
|
Assert.assertTrue(drainResult);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* readValue should fail safely and just continue looping
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void unexcpectedStatusFailure() {
|
||||||
|
BufferedReader bufferReader = Mockito.mock(BufferedReader.class);
|
||||||
|
try {
|
||||||
|
Mockito.doAnswer(new Answer() {
|
||||||
|
private boolean returnedOnce = false;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||||
|
if (returnedOnce) {
|
||||||
|
return "{\"action\":\"status\",\"responseFor\":\"processRecords\"}";
|
||||||
|
} else {
|
||||||
|
returnedOnce = true;
|
||||||
|
return "{\"action\":\"shutdown\",\"reason\":\"ZOMBIE\"}";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}).when(bufferReader).readLine();
|
||||||
|
} catch (IOException e) {
|
||||||
|
Assert.fail("There shouldn't be an exception while setting up this mock.");
|
||||||
|
}
|
||||||
|
|
||||||
|
MessageReader reader =
|
||||||
|
new MessageReader().initialize(bufferReader, shardId, new ObjectMapper(),
|
||||||
|
Executors.newCachedThreadPool());
|
||||||
|
|
||||||
|
try {
|
||||||
|
reader.getNextMessageFromSTDOUT().get();
|
||||||
|
} catch (Exception e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
Assert.fail("MessageReader should have handled the bad message gracefully");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void messageReaderBuilderTest() {
|
||||||
|
InputStream stream = new ByteArrayInputStream("".getBytes());
|
||||||
|
MessageReader reader =
|
||||||
|
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
|
Assert.assertNotNull(reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void readLineFails() throws IOException {
|
||||||
|
BufferedReader input = Mockito.mock(BufferedReader.class);
|
||||||
|
Mockito.doThrow(IOException.class).when(input).readLine();
|
||||||
|
MessageReader reader =
|
||||||
|
new MessageReader().initialize(input, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
|
|
||||||
|
Future<Message> readTask = reader.getNextMessageFromSTDOUT();
|
||||||
|
|
||||||
|
try {
|
||||||
|
readTask.get();
|
||||||
|
Assert.fail("The reading task should have failed due to an IOException.");
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
Assert.fail("The reading task should not have been interrupted. It should have failed due to an IOException.");
|
||||||
|
} catch (ExecutionException e) {
|
||||||
|
// Yay!!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void noMoreMessagesTest() throws InterruptedException {
|
||||||
|
InputStream stream = new ByteArrayInputStream("".getBytes());
|
||||||
|
MessageReader reader =
|
||||||
|
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
|
Future<Message> future = reader.getNextMessageFromSTDOUT();
|
||||||
|
|
||||||
|
try {
|
||||||
|
future.get();
|
||||||
|
Assert.fail("There should have been an execution exception if there were no more messages to get.");
|
||||||
|
} catch (ExecutionException e) {
|
||||||
|
// Good path.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,153 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.multilang;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.Message;
|
||||||
|
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
public class MessageWriterTest {
|
||||||
|
|
||||||
|
private static final String shardId = "shard-123";
|
||||||
|
MessageWriter messageWriter;
|
||||||
|
OutputStream stream;
|
||||||
|
|
||||||
|
// ExecutorService executor;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() {
|
||||||
|
stream = Mockito.mock(OutputStream.class);
|
||||||
|
messageWriter =
|
||||||
|
new MessageWriter().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Here we are just testing that calling write causes bytes to get written to the stream.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void writeCheckpointMessageNoErrorTest() throws IOException, InterruptedException, ExecutionException {
|
||||||
|
Future<Boolean> future = this.messageWriter.writeCheckpointMessageWithError("1234", null);
|
||||||
|
future.get();
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(),
|
||||||
|
Mockito.anyInt());
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void writeCheckpointMessageWithErrorTest() throws IOException, InterruptedException, ExecutionException {
|
||||||
|
Future<Boolean> future = this.messageWriter.writeCheckpointMessageWithError("1234", new Throwable());
|
||||||
|
future.get();
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(),
|
||||||
|
Mockito.anyInt());
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void writeInitializeMessageTest() throws IOException, InterruptedException, ExecutionException {
|
||||||
|
Future<Boolean> future = this.messageWriter.writeInitializeMessage(shardId);
|
||||||
|
future.get();
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(),
|
||||||
|
Mockito.anyInt());
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void writeProcessRecordsMessageTest() throws IOException, InterruptedException, ExecutionException {
|
||||||
|
List<Record> records = new ArrayList<Record>() {
|
||||||
|
{
|
||||||
|
this.add(new Record() {
|
||||||
|
{
|
||||||
|
this.setData(ByteBuffer.wrap("kitten".getBytes()));
|
||||||
|
this.setPartitionKey("some cats");
|
||||||
|
this.setSequenceNumber("357234807854789057805");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
this.add(new Record());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Future<Boolean> future = this.messageWriter.writeProcessRecordsMessage(records);
|
||||||
|
future.get();
|
||||||
|
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(),
|
||||||
|
Mockito.anyInt());
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void writeShutdownMessageTest() throws IOException, InterruptedException, ExecutionException {
|
||||||
|
Future<Boolean> future = this.messageWriter.writeShutdownMessage(ShutdownReason.TERMINATE);
|
||||||
|
future.get();
|
||||||
|
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(),
|
||||||
|
Mockito.anyInt());
|
||||||
|
Mockito.verify(this.stream, Mockito.atLeastOnce()).flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void streamIOExceptionTest() throws IOException, InterruptedException, ExecutionException {
|
||||||
|
Mockito.doThrow(IOException.class).when(stream).flush();
|
||||||
|
Future<Boolean> initializeTask = this.messageWriter.writeInitializeMessage(shardId);
|
||||||
|
Boolean result = initializeTask.get();
|
||||||
|
Assert.assertNotNull(result);
|
||||||
|
Assert.assertFalse(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void objectMapperFails() throws JsonProcessingException, InterruptedException, ExecutionException {
|
||||||
|
ObjectMapper mapper = Mockito.mock(ObjectMapper.class);
|
||||||
|
Mockito.doThrow(JsonProcessingException.class).when(mapper).writeValueAsString(Mockito.any(Message.class));
|
||||||
|
messageWriter = new MessageWriter().initialize(stream, shardId, mapper, Executors.newCachedThreadPool());
|
||||||
|
|
||||||
|
try {
|
||||||
|
messageWriter.writeShutdownMessage(ShutdownReason.ZOMBIE);
|
||||||
|
Assert.fail("The mapper failed so no write method should be able to succeed.");
|
||||||
|
} catch (Exception e) {
|
||||||
|
// Note that this is different than the stream failing. The stream is expected to fail, so we handle it
|
||||||
|
// gracefully, but the JSON mapping should always succeed.
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void closeWriterTest() throws IOException {
|
||||||
|
Assert.assertTrue(this.messageWriter.isOpen());
|
||||||
|
this.messageWriter.close();
|
||||||
|
Mockito.verify(this.stream, Mockito.times(1)).close();
|
||||||
|
Assert.assertFalse(this.messageWriter.isOpen());
|
||||||
|
try {
|
||||||
|
// Any message should fail
|
||||||
|
this.messageWriter.writeInitializeMessage(shardId);
|
||||||
|
Assert.fail("MessageWriter should be closed and unable to write.");
|
||||||
|
} catch (IllegalStateException e) {
|
||||||
|
// This should happen.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,91 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.multilang;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import com.amazonaws.auth.AWSCredentials;
|
||||||
|
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfigurator;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
|
||||||
|
|
||||||
|
public class MultiLangDaemonConfigTest {
|
||||||
|
|
||||||
|
private static String FILENAME = "some.properties";
|
||||||
|
|
||||||
|
private KinesisClientLibConfigurator buildMockConfigurator() {
|
||||||
|
AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class);
|
||||||
|
AWSCredentials creds = Mockito.mock(AWSCredentials.class);
|
||||||
|
Mockito.doReturn(creds).when(credentialsProvider).getCredentials();
|
||||||
|
Mockito.doReturn("cool-user").when(creds).getAWSAccessKeyId();
|
||||||
|
KinesisClientLibConfiguration kclConfig =
|
||||||
|
new KinesisClientLibConfiguration("cool-app", "cool-stream", credentialsProvider, "cool-worker");
|
||||||
|
KinesisClientLibConfigurator configurator = Mockito.mock(KinesisClientLibConfigurator.class);
|
||||||
|
Mockito.doReturn(kclConfig).when(configurator).getConfiguration(Mockito.any(Properties.class));
|
||||||
|
return configurator;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void constructorTest() throws IOException {
|
||||||
|
String PROPERTIES =
|
||||||
|
"executableName = randomEXE \n" + "applicationName = testApp \n" + "streamName = fakeStream \n"
|
||||||
|
+ "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n"
|
||||||
|
+ "processingLanguage = malbolge";
|
||||||
|
ClassLoader classLoader = Mockito.mock(ClassLoader.class);
|
||||||
|
|
||||||
|
Mockito.doReturn(new ByteArrayInputStream(PROPERTIES.getBytes()))
|
||||||
|
.when(classLoader)
|
||||||
|
.getResourceAsStream(FILENAME);
|
||||||
|
|
||||||
|
MultiLangDaemonConfig deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, buildMockConfigurator());
|
||||||
|
|
||||||
|
assertNotNull(deamonConfig.getExecutorService());
|
||||||
|
assertNotNull(deamonConfig.getKinesisClientLibConfiguration());
|
||||||
|
assertNotNull(deamonConfig.getRecordProcessorFactory());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void propertyValidation() {
|
||||||
|
String PROPERTIES_NO_EXECUTABLE_NAME =
|
||||||
|
"applicationName = testApp \n" + "streamName = fakeStream \n"
|
||||||
|
+ "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n"
|
||||||
|
+ "processingLanguage = malbolge";
|
||||||
|
ClassLoader classLoader = Mockito.mock(ClassLoader.class);
|
||||||
|
|
||||||
|
Mockito.doReturn(new ByteArrayInputStream(PROPERTIES_NO_EXECUTABLE_NAME.getBytes()))
|
||||||
|
.when(classLoader)
|
||||||
|
.getResourceAsStream(FILENAME);
|
||||||
|
|
||||||
|
MultiLangDaemonConfig config;
|
||||||
|
try {
|
||||||
|
config = new MultiLangDaemonConfig(FILENAME, classLoader, buildMockConfigurator());
|
||||||
|
Assert.fail("Construction of the config should have failed due to property validation failing.");
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
// Good
|
||||||
|
} catch (IOException e) {
|
||||||
|
Assert.fail();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,54 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.multilang;
|
||||||
|
|
||||||
|
import java.io.PrintStream;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
import com.amazonaws.auth.AWSCredentials;
|
||||||
|
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
|
||||||
|
|
||||||
|
public class MultiLangDaemonTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void buildWorkerTest() {
|
||||||
|
// Mocking Kinesis creds
|
||||||
|
AWSCredentialsProvider provider = Mockito.mock(AWSCredentialsProvider.class);
|
||||||
|
Mockito.doReturn(Mockito.mock(AWSCredentials.class)).when(provider).getCredentials();
|
||||||
|
KinesisClientLibConfiguration configuration = new KinesisClientLibConfiguration( "Derp",
|
||||||
|
"Blurp",
|
||||||
|
provider,
|
||||||
|
"Worker");
|
||||||
|
|
||||||
|
MultiLangRecordProcessorFactory factory = Mockito.mock(MultiLangRecordProcessorFactory.class);
|
||||||
|
Mockito.doReturn(new String[] { "someExecutableName" }).when(factory).getCommandArray();
|
||||||
|
MultiLangDaemon daemon =
|
||||||
|
new MultiLangDaemon(configuration, factory, Executors.newCachedThreadPool());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void usageTest() {
|
||||||
|
PrintStream printStream = Mockito.mock(PrintStream.class);
|
||||||
|
|
||||||
|
String message = "Everything blew up";
|
||||||
|
|
||||||
|
MultiLangDaemon.printUsage(printStream, message);
|
||||||
|
Mockito.verify(printStream, Mockito.times(1)).println(Mockito.contains(message));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,155 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.multilang;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
|
import org.mockito.stubbing.Answer;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.CheckpointMessage;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.Message;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.StatusMessage;
|
||||||
|
|
||||||
|
public class MultiLangProtocolTest {
|
||||||
|
|
||||||
|
private MultiLangProtocol protocol;
|
||||||
|
private MessageWriter messageWriter;
|
||||||
|
private MessageReader messageReader;
|
||||||
|
private String shardId;
|
||||||
|
private IRecordProcessorCheckpointer checkpointer;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() {
|
||||||
|
this.shardId = "shard-id-123";
|
||||||
|
messageWriter = Mockito.mock(MessageWriter.class);
|
||||||
|
messageReader = Mockito.mock(MessageReader.class);
|
||||||
|
protocol = new MultiLangProtocol(messageReader, messageWriter, shardId);
|
||||||
|
checkpointer = Mockito.mock(IRecordProcessorCheckpointer.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Future<Boolean> buildBooleanFuture(boolean val) throws InterruptedException, ExecutionException {
|
||||||
|
Future<Boolean> successFuture = Mockito.mock(Future.class);
|
||||||
|
Mockito.doReturn(val).when(successFuture).get();
|
||||||
|
return successFuture;
|
||||||
|
}
|
||||||
|
|
||||||
|
private Future<Message> buildMessageFuture(Message message) throws InterruptedException, ExecutionException {
|
||||||
|
Future<Message> messageFuture = Mockito.mock(Future.class);
|
||||||
|
Mockito.doReturn(message).when(messageFuture).get();
|
||||||
|
return messageFuture;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void initializeTest() throws InterruptedException, ExecutionException {
|
||||||
|
Mockito.doReturn(buildBooleanFuture(true)).when(messageWriter).writeInitializeMessage(shardId);
|
||||||
|
Mockito.doReturn(buildMessageFuture(new StatusMessage("initialize"))).when(messageReader).getNextMessageFromSTDOUT();
|
||||||
|
Assert.assertTrue(protocol.initialize());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void processRecordsTest() throws InterruptedException, ExecutionException {
|
||||||
|
Mockito.doReturn(buildBooleanFuture(true)).when(messageWriter).writeProcessRecordsMessage(Mockito.anyList());
|
||||||
|
Mockito.doReturn(buildMessageFuture(new StatusMessage("processRecords"))).when(messageReader).getNextMessageFromSTDOUT();
|
||||||
|
Assert.assertTrue(protocol.processRecords(new ArrayList<Record>(), null));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shutdownTest() throws InterruptedException, ExecutionException {
|
||||||
|
Mockito.doReturn(buildBooleanFuture(true)).when(messageWriter)
|
||||||
|
.writeShutdownMessage(Mockito.any(ShutdownReason.class));
|
||||||
|
Mockito.doReturn(buildMessageFuture(new StatusMessage("shutdown"))).when(messageReader).getNextMessageFromSTDOUT();
|
||||||
|
Assert.assertTrue(protocol.shutdown(null, ShutdownReason.ZOMBIE));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Answer<Future<Message>> buildMessageAnswers(List<Message> messages) {
|
||||||
|
return new Answer<Future<Message>>() {
|
||||||
|
|
||||||
|
Iterator<Message> messageIterator;
|
||||||
|
Message message;
|
||||||
|
|
||||||
|
Answer<Future<Message>> init(List<Message> messages) {
|
||||||
|
messageIterator = messages.iterator();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Future<Message> answer(InvocationOnMock invocation) throws Throwable {
|
||||||
|
if (this.messageIterator.hasNext()) {
|
||||||
|
message = this.messageIterator.next();
|
||||||
|
}
|
||||||
|
return buildMessageFuture(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
}.init(messages);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void processRecordsWithCheckpointsTest() throws InterruptedException, ExecutionException,
|
||||||
|
KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||||
|
|
||||||
|
Mockito.doReturn(buildBooleanFuture(true)).when(messageWriter).writeProcessRecordsMessage(Mockito.anyList());
|
||||||
|
Mockito.doReturn(buildBooleanFuture(true)).when(messageWriter)
|
||||||
|
.writeCheckpointMessageWithError(Mockito.anyString(), Mockito.any(Throwable.class));
|
||||||
|
Mockito.doAnswer(buildMessageAnswers(new ArrayList<Message>() {
|
||||||
|
{
|
||||||
|
this.add(new CheckpointMessage("123", null));
|
||||||
|
this.add(new CheckpointMessage(null, null));
|
||||||
|
/*
|
||||||
|
* This procesRecords message will be ignored by the read loop which only cares about status and
|
||||||
|
* checkpoint messages. All other lines and message types are ignored. By inserting it here, we check
|
||||||
|
* that this test succeeds even with unexpected messaging.
|
||||||
|
*/
|
||||||
|
this.add(new ProcessRecordsMessage());
|
||||||
|
this.add(new StatusMessage("processRecords"));
|
||||||
|
}
|
||||||
|
})).when(messageReader).getNextMessageFromSTDOUT();
|
||||||
|
Assert.assertTrue(protocol.processRecords(new ArrayList<Record>(), checkpointer));
|
||||||
|
|
||||||
|
Mockito.verify(checkpointer, Mockito.timeout(1)).checkpoint();
|
||||||
|
Mockito.verify(checkpointer, Mockito.timeout(1)).checkpoint("123");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void processRecordsWithABadCheckpointTest() throws InterruptedException, ExecutionException {
|
||||||
|
Mockito.doReturn(buildBooleanFuture(true)).when(messageWriter).writeProcessRecordsMessage(Mockito.anyList());
|
||||||
|
Mockito.doReturn(buildBooleanFuture(false)).when(messageWriter)
|
||||||
|
.writeCheckpointMessageWithError(Mockito.anyString(), Mockito.any(Throwable.class));
|
||||||
|
Mockito.doAnswer(buildMessageAnswers(new ArrayList<Message>() {
|
||||||
|
{
|
||||||
|
this.add(new CheckpointMessage("456", null));
|
||||||
|
this.add(new StatusMessage("processRecords"));
|
||||||
|
}
|
||||||
|
})).when(messageReader).getNextMessageFromSTDOUT();
|
||||||
|
Assert.assertFalse(protocol.processRecords(new ArrayList<Record>(), checkpointer));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,90 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.multilang;
|
||||||
|
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.InputStreamReader;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
public class ReadSTDERRTaskTest {
|
||||||
|
|
||||||
|
private static final String shardId = "shard-123";
|
||||||
|
private BufferedReader mockBufferReader;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() {
|
||||||
|
mockBufferReader = Mockito.mock(BufferedReader.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void errorReaderBuilderTest() {
|
||||||
|
|
||||||
|
String errorMessages = "OMG\nThis is test message\n blah blah blah \n";
|
||||||
|
InputStream stream = new ByteArrayInputStream(errorMessages.getBytes());
|
||||||
|
LineReaderTask<Boolean> reader = new DrainChildSTDERRTask().initialize(stream, shardId, "");
|
||||||
|
Assert.assertNotNull(reader);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void runTest() throws Exception {
|
||||||
|
String errorMessages = "OMG\nThis is test message\n blah blah blah \n";
|
||||||
|
BufferedReader bufferReader =
|
||||||
|
new BufferedReader(new InputStreamReader(new ByteArrayInputStream(errorMessages.getBytes())));
|
||||||
|
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(bufferReader, shardId, "");
|
||||||
|
Assert.assertNotNull(errorReader);
|
||||||
|
|
||||||
|
Boolean result = errorReader.call();
|
||||||
|
Assert.assertTrue(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void runErrorTest(Exception exceptionToThrow) {
|
||||||
|
try {
|
||||||
|
Mockito.doThrow(exceptionToThrow).when(mockBufferReader).readLine();
|
||||||
|
} catch (IOException e) {
|
||||||
|
Assert.fail("Not supposed to get an exception when we're just building our mock.");
|
||||||
|
}
|
||||||
|
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(mockBufferReader, shardId, "");
|
||||||
|
Assert.assertNotNull(errorReader);
|
||||||
|
Future<Boolean> result = Executors.newCachedThreadPool().submit(errorReader);
|
||||||
|
Boolean finishedCleanly = null;
|
||||||
|
try {
|
||||||
|
finishedCleanly = result.get();
|
||||||
|
} catch (InterruptedException | ExecutionException e) {
|
||||||
|
Assert.fail("Should have been able to get a result. The error should be handled during the call and result in false.");
|
||||||
|
}
|
||||||
|
Assert.assertFalse("Reading a line should have thrown an exception", finishedCleanly);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void runCausesIOErrorTest() {
|
||||||
|
runErrorTest(new IOException());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void runCausesUnExpectedErrorTest() throws IOException {
|
||||||
|
Mockito.doThrow(IOException.class).when(this.mockBufferReader).close();
|
||||||
|
runErrorTest(new IOException());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.multilang;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor;
|
||||||
|
|
||||||
|
public class StreamingRecordProcessorFactoryTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void createProcessorTest() {
|
||||||
|
MultiLangRecordProcessorFactory factory = new MultiLangRecordProcessorFactory("somecommand", null);
|
||||||
|
IRecordProcessor processor = factory.createProcessor();
|
||||||
|
|
||||||
|
Assert.assertEquals("Should have constructed a StreamingRecordProcessor", MultiLangRecordProcessor.class,
|
||||||
|
processor.getClass());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,219 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.multilang;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
|
import org.mockito.stubbing.Answer;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.InitializeMessage;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.ShutdownMessage;
|
||||||
|
import com.amazonaws.services.kinesis.multilang.messages.StatusMessage;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
public class StreamingRecordProcessorTest {
|
||||||
|
|
||||||
|
private static final String shardId = "shard-123";
|
||||||
|
|
||||||
|
private int systemExitCount = 0;
|
||||||
|
|
||||||
|
private IRecordProcessorCheckpointer unimplementedCheckpointer = new IRecordProcessorCheckpointer() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException,
|
||||||
|
ThrottlingException, ShutdownException {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void checkpoint(String sequenceNumber) throws KinesisClientLibDependencyException,
|
||||||
|
InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void checkpoint(Record record)
|
||||||
|
throws KinesisClientLibDependencyException,
|
||||||
|
InvalidStateException, ThrottlingException, ShutdownException {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void checkpoint(String sequenceNumber, long subSequenceNumber)
|
||||||
|
throws KinesisClientLibDependencyException,
|
||||||
|
InvalidStateException, ThrottlingException, ShutdownException,
|
||||||
|
IllegalArgumentException {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private MessageWriter messageWriter;
|
||||||
|
|
||||||
|
private DrainChildSTDERRTask errorReader;
|
||||||
|
|
||||||
|
private MessageReader messageReader;
|
||||||
|
|
||||||
|
private MultiLangRecordProcessor recordProcessor;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void prepare() throws IOException, InterruptedException, ExecutionException {
|
||||||
|
// Fake command
|
||||||
|
String command = "derp";
|
||||||
|
systemExitCount = 0;
|
||||||
|
|
||||||
|
// Mocks
|
||||||
|
ExecutorService executor = Executors.newFixedThreadPool(3);
|
||||||
|
final Process process = Mockito.mock(Process.class);
|
||||||
|
|
||||||
|
messageWriter = Mockito.mock(MessageWriter.class);
|
||||||
|
messageReader = Mockito.mock(MessageReader.class);
|
||||||
|
errorReader = Mockito.mock(DrainChildSTDERRTask.class);
|
||||||
|
|
||||||
|
recordProcessor =
|
||||||
|
new MultiLangRecordProcessor(new ProcessBuilder(), executor, new ObjectMapper(), messageWriter,
|
||||||
|
messageReader, errorReader) {
|
||||||
|
|
||||||
|
// Just don't do anything when we exit.
|
||||||
|
void exit() {
|
||||||
|
systemExitCount += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inject our mock process
|
||||||
|
Process startProcess() {
|
||||||
|
return process;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Our process will return mock streams
|
||||||
|
InputStream inputStream = Mockito.mock(InputStream.class);
|
||||||
|
InputStream errorStream = Mockito.mock(InputStream.class);
|
||||||
|
OutputStream outputStream = Mockito.mock(OutputStream.class);
|
||||||
|
Mockito.doReturn(inputStream).when(process).getInputStream();
|
||||||
|
Mockito.doReturn(errorStream).when(process).getErrorStream();
|
||||||
|
Mockito.doReturn(outputStream).when(process).getOutputStream();
|
||||||
|
|
||||||
|
Mockito.doReturn(Mockito.mock(Future.class)).when(messageReader).drainSTDOUT();
|
||||||
|
Future<Boolean> trueFuture = Mockito.mock(Future.class);
|
||||||
|
Mockito.doReturn(true).when(trueFuture).get();
|
||||||
|
|
||||||
|
Mockito.doReturn(trueFuture).when(messageWriter).writeInitializeMessage(Mockito.anyString());
|
||||||
|
Mockito.doReturn(trueFuture).when(messageWriter)
|
||||||
|
.writeCheckpointMessageWithError(Mockito.anyString(), Mockito.any(Throwable.class));
|
||||||
|
Mockito.doReturn(trueFuture).when(messageWriter).writeProcessRecordsMessage(Mockito.anyList());
|
||||||
|
Mockito.doReturn(trueFuture).when(messageWriter).writeShutdownMessage(Mockito.any(ShutdownReason.class));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void phases(Answer<StatusMessage> answer) throws InterruptedException, ExecutionException {
|
||||||
|
/*
|
||||||
|
* Return a status message for each call
|
||||||
|
* Plan is:
|
||||||
|
* initialize
|
||||||
|
* processRecords
|
||||||
|
* processRecords
|
||||||
|
* shutdown
|
||||||
|
*/
|
||||||
|
Future<StatusMessage> future = Mockito.mock(Future.class);
|
||||||
|
Mockito.doAnswer(answer).when(future).get();
|
||||||
|
Mockito.doReturn(future).when(messageReader).getNextMessageFromSTDOUT();
|
||||||
|
|
||||||
|
List<Record> testRecords = new ArrayList<Record>();
|
||||||
|
|
||||||
|
recordProcessor.initialize(shardId);
|
||||||
|
recordProcessor.processRecords(testRecords, unimplementedCheckpointer);
|
||||||
|
recordProcessor.processRecords(testRecords, unimplementedCheckpointer);
|
||||||
|
recordProcessor.shutdown(unimplementedCheckpointer, ShutdownReason.ZOMBIE);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void processorPhasesTest() throws InterruptedException, ExecutionException {
|
||||||
|
|
||||||
|
Answer<StatusMessage> answer = new Answer<StatusMessage>() {
|
||||||
|
|
||||||
|
StatusMessage[] answers = new StatusMessage[] { new StatusMessage(InitializeMessage.ACTION),
|
||||||
|
new StatusMessage(ProcessRecordsMessage.ACTION), new StatusMessage(ProcessRecordsMessage.ACTION),
|
||||||
|
new StatusMessage(ShutdownMessage.ACTION) };
|
||||||
|
|
||||||
|
int callCount = 0;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public StatusMessage answer(InvocationOnMock invocation) throws Throwable {
|
||||||
|
if (callCount < answers.length) {
|
||||||
|
return answers[callCount++];
|
||||||
|
} else {
|
||||||
|
throw new Throwable("Too many calls to getNextStatusMessage");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
phases(answer);
|
||||||
|
|
||||||
|
Mockito.verify(messageWriter, Mockito.times(1)).writeInitializeMessage(shardId);
|
||||||
|
Mockito.verify(messageWriter, Mockito.times(2)).writeProcessRecordsMessage(Mockito.anyList());
|
||||||
|
Mockito.verify(messageWriter, Mockito.times(1)).writeShutdownMessage(ShutdownReason.ZOMBIE);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void initFailsTest() throws InterruptedException, ExecutionException {
|
||||||
|
Answer<StatusMessage> answer = new Answer<StatusMessage>() {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This bad message will cause shutdown to not attempt to send a message. i.e. avoid encountering an
|
||||||
|
* exception.
|
||||||
|
*/
|
||||||
|
StatusMessage[] answers = new StatusMessage[] { new StatusMessage("Bad"),
|
||||||
|
new StatusMessage(ProcessRecordsMessage.ACTION), new StatusMessage(ProcessRecordsMessage.ACTION),
|
||||||
|
new StatusMessage(ShutdownMessage.ACTION) };
|
||||||
|
|
||||||
|
int callCount = 0;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public StatusMessage answer(InvocationOnMock invocation) throws Throwable {
|
||||||
|
if (callCount < answers.length) {
|
||||||
|
return answers[callCount++];
|
||||||
|
} else {
|
||||||
|
throw new Throwable("Too many calls to getNextStatusMessage");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
phases(answer);
|
||||||
|
|
||||||
|
Mockito.verify(messageWriter, Mockito.times(1)).writeInitializeMessage(shardId);
|
||||||
|
Mockito.verify(messageWriter, Mockito.times(2)).writeProcessRecordsMessage(Mockito.anyList());
|
||||||
|
Mockito.verify(messageWriter, Mockito.times(0)).writeShutdownMessage(ShutdownReason.ZOMBIE);
|
||||||
|
Assert.assertEquals(1, systemExitCount);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,76 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Amazon Software License (the "License").
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* A copy of the License is located at
|
||||||
|
*
|
||||||
|
* http://aws.amazon.com/asl/
|
||||||
|
*
|
||||||
|
* or in the "license" file accompanying this file. This file is distributed
|
||||||
|
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||||
|
* express or implied. See the License for the specific language governing
|
||||||
|
* permissions and limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.amazonaws.services.kinesis.multilang.messages;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason;
|
||||||
|
import com.amazonaws.services.kinesis.model.Record;
|
||||||
|
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
|
public class MessageTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void toStringTest() {
|
||||||
|
Message[] messages =
|
||||||
|
new Message[] { new CheckpointMessage("1234567890", null), new InitializeMessage("shard-123"),
|
||||||
|
new ProcessRecordsMessage(new ArrayList<Record>() {
|
||||||
|
{
|
||||||
|
this.add(new Record() {
|
||||||
|
{
|
||||||
|
this.withData(ByteBuffer.wrap("cat".getBytes()));
|
||||||
|
this.withPartitionKey("cat");
|
||||||
|
this.withSequenceNumber("555");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}), new ShutdownMessage(ShutdownReason.ZOMBIE), new StatusMessage("processRecords"),
|
||||||
|
new InitializeMessage(), new ProcessRecordsMessage() };
|
||||||
|
|
||||||
|
for (int i = 0; i < messages.length; i++) {
|
||||||
|
Assert.assertTrue("Each message should contain the action field", messages[i].toString().contains("action"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hit this constructor
|
||||||
|
JsonFriendlyRecord defaultJsonFriendlyRecord = new JsonFriendlyRecord();
|
||||||
|
Assert.assertNull(defaultJsonFriendlyRecord.getPartitionKey());
|
||||||
|
Assert.assertNull(defaultJsonFriendlyRecord.getData());
|
||||||
|
Assert.assertNull(defaultJsonFriendlyRecord.getSequenceNumber());
|
||||||
|
Assert.assertNull(new ShutdownMessage(null).getReason());
|
||||||
|
|
||||||
|
// Hit the bad object mapping path
|
||||||
|
Message withBadMapper = new Message() {
|
||||||
|
}.withObjectMapper(new ObjectMapper() {
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
private static final long serialVersionUID = 1L;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String writeValueAsString(Object m) throws JsonProcessingException {
|
||||||
|
throw new JsonProcessingException(new Throwable()) {
|
||||||
|
};
|
||||||
|
}
|
||||||
|
});
|
||||||
|
String s = withBadMapper.toString();
|
||||||
|
Assert.assertNotNull(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
8
src/test/java/log4j.properties
Normal file
8
src/test/java/log4j.properties
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
log4j.rootLogger=INFO, A1
|
||||||
|
log4j.appender.A1=org.apache.log4j.ConsoleAppender
|
||||||
|
log4j.appender.A1.layout=org.apache.log4j.PatternLayout
|
||||||
|
|
||||||
|
# Print the date in ISO 8601 format
|
||||||
|
log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n
|
||||||
|
|
||||||
|
log4j.logger.org.apache.http=WARN
|
||||||
Loading…
Reference in a new issue