diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..add91b85 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1 @@ +63ff312818a5f70eab9ec5bf80b53bdd7bf80248 diff --git a/.github/dependabot.yml b/.github/dependabot.yml index cad9988a..fd4483ad 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -2,12 +2,18 @@ version: 2 updates: - package-ecosystem: "maven" directory: "/" + labels: + - "dependencies" + - "v2.x" schedule: interval: "weekly" # branch - v1.x - package-ecosystem: "maven" directory: "/" + labels: + - "dependencies" + - "v1.x" target-branch: "v1.x" schedule: interval: "weekly" diff --git a/.github/scripts/backwards_compatibility_check.sh b/.github/scripts/backwards_compatibility_check.sh new file mode 100755 index 00000000..78f95cc6 --- /dev/null +++ b/.github/scripts/backwards_compatibility_check.sh @@ -0,0 +1,128 @@ +#!/bin/bash + +TRUE=1 +FALSE=0 +KCL_MAVEN_DIR=~/.m2/repository/software/amazon/kinesis/amazon-kinesis-client + +REMOVED_METHODS_FLAG=$FALSE +LATEST_VERSION="" +LATEST_JAR="" +CURRENT_VERSION="" +CURRENT_JAR="" + +# Get the JAR from the latest version release on Maven. +get_latest_jar() { + # clear the directory so that the latest release will be the only version in the Maven directory after running mvn dependency:get + rm -rf "$KCL_MAVEN_DIR" + mvn -B dependency:get -Dartifact=software.amazon.kinesis:amazon-kinesis-client:LATEST + LATEST_VERSION=$(ls "$KCL_MAVEN_DIR" | grep -E '[0-9]+.[0-9]+.[0-9]+') + LATEST_JAR=$KCL_MAVEN_DIR/$LATEST_VERSION/amazon-kinesis-client-$LATEST_VERSION.jar +} + +# Get the JAR with the changes that need to be verified. +get_current_jar() { + mvn -B install -Dmaven.test.skip=true + CURRENT_VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec) + CURRENT_JAR=$KCL_MAVEN_DIR/$CURRENT_VERSION/amazon-kinesis-client-$CURRENT_VERSION.jar +} + +is_new_minor_release() { + local latest_minor_version=$(echo "$LATEST_VERSION" | cut -d . -f 2) + local current_minor_version=$(echo "$CURRENT_VERSION" | cut -d . -f 2) + [[ "$latest_minor_version" != "$current_minor_version" ]] + return $? +} + +# Skip classes with the KinesisClientInternalApi annotation. These classes are subject to breaking backwards compatibility. +is_kinesis_client_internal_api() { + local current_class="$1" + local grep_internal_api_result=$(javap -v -classpath "$LATEST_JAR" "$current_class" | grep KinesisClientInternalApi) + [[ "$grep_internal_api_result" != "" ]] + return $? +} + +# Skip classes which are not public (e.g. package level). These classes will not break backwards compatibility. +is_non_public_class() { + local current_class="$1" + local class_definition=$(javap -classpath "$LATEST_JAR" "$current_class" | head -2 | tail -1) + [[ "$class_definition" != *"public"* ]] + return $? +} + +# Ignore methods that change from abstract to non-abstract (and vice versa) if the class is an interface. +ignore_abstract_changes_in_interfaces() { + local current_class="$1" + local class_definition=$(javap -classpath "$LATEST_JAR" "$current_class" | head -2 | tail -1) + if [[ $class_definition == *"interface"* ]] + then + LATEST_METHODS=${LATEST_METHODS// abstract / } + CURRENT_METHODS=${CURRENT_METHODS// abstract / } + fi +} + +# Checks if there are any methods in the latest version that were removed in the current version. +find_removed_methods() { + echo "Checking if methods in current version (v$CURRENT_VERSION) were removed from latest version (v$LATEST_VERSION)" + if is_new_minor_release + then + echo "New minor release is being performed. Ignoring changes in classes marked with @KinesisClientInternalApi annotation." + fi + local latest_classes=$( + jar tf $LATEST_JAR | + grep .class | + tr / . | + sed 's/\.class$//' | + # skip generated proto classes since these have a lot of inherited methods + # that are not outputted by javap. besides, generated java code is not a + # good indicator of proto compatibility- it will not capture reserved + # tags or deprecated fields. + grep -v 'software\.amazon\.kinesis\.retrieval\.kpl\.Messages') + for class in $latest_classes + do + if (is_kinesis_client_internal_api "$class" && is_new_minor_release) || is_non_public_class "$class" + then + continue + fi + + LATEST_METHODS=$(javap -classpath "$LATEST_JAR" "$class") + CURRENT_METHODS=$(javap -classpath "$CURRENT_JAR" "$class") + + ignore_abstract_changes_in_interfaces "$class" + + local removed_methods=$(diff <(echo "$LATEST_METHODS") <(echo "$CURRENT_METHODS") | grep '^<') + + # ignore synthetic access methods - these are not available to users and will not break backwards compatibility + removed_methods=$(echo "$removed_methods" | grep -v "access\$[0-9]\+") + + if [[ "$removed_methods" != "" ]] + then + REMOVED_METHODS_FLAG=$TRUE + if is_kinesis_client_internal_api "$class" + then + echo "Found removed methods in class with @KinesisClientInternalApi annotation. To resolve these issues, upgrade the current minor version or address these changes." + fi + echo "$class does not have method(s):" + echo "$removed_methods" + fi + done +} + +get_backwards_compatible_result() { + if [[ $REMOVED_METHODS_FLAG == $TRUE ]] + then + echo "Current KCL version $CURRENT_VERSION is not backwards compatible with version $LATEST_VERSION. See output above for removed packages/methods." + exit 1 + else + echo "Current KCL version $CURRENT_VERSION is backwards compatible with version $LATEST_VERSION." + exit 0 + fi +} + +main() { + get_latest_jar + get_current_jar + find_removed_methods + get_backwards_compatible_result +} + +main diff --git a/.github/workflows/maven.yml b/.github/workflows/maven.yml new file mode 100644 index 00000000..81fca35b --- /dev/null +++ b/.github/workflows/maven.yml @@ -0,0 +1,46 @@ +# This workflow will build a Java project with Maven, and cache/restore any dependencies to improve the workflow execution time +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-java-with-maven + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Java CI with Maven + +on: + push: + branches: + - "master" + pull_request: + branches: + - "master" + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Set up JDK 8 + uses: actions/setup-java@v4 + with: + java-version: '8' + distribution: 'corretto' + - name: Build with Maven + run: mvn -B package --file pom.xml -DskipITs + + backwards-compatible-check: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Set up JDK 8 + uses: actions/setup-java@v4 + with: + java-version: '8' + distribution: 'corretto' + - name: Check backwards compatibility of changes + run: .github/scripts/backwards_compatibility_check.sh \ No newline at end of file diff --git a/.gitignore b/.gitignore index 7fd632f3..c4d508a5 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ target/ AwsCredentials.properties .idea *.iml -.DS_Store \ No newline at end of file +*.swp +.DS_Store diff --git a/.log.swp b/.log.swp deleted file mode 100644 index bdb60bb3..00000000 Binary files a/.log.swp and /dev/null differ diff --git a/CHANGELOG.md b/CHANGELOG.md index 33391211..0273992e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,136 @@ For **1.x** release notes, please see [v1.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v1.x/CHANGELOG.md) --- +### Release 2.6.0 (2024-05-01) +* [#1317](https://github.com/awslabs/amazon-kinesis-client/pull/1317) Add enablePriorityLeaseAssignment config +* [#1320](https://github.com/awslabs/amazon-kinesis-client/pull/1320) Update lease taker to get unassigned leases +* [#1318](https://github.com/awslabs/amazon-kinesis-client/pull/1318) Internally construct and use stream ARNs for all streams in multi-stream mode +* [#1291](https://github.com/awslabs/amazon-kinesis-client/pull/1291) Update RetrievalFactory implementations to utilize the StreamIdentifier field of StreamConfig +* [#1308](https://github.com/awslabs/amazon-kinesis-client/pull/1308) Move shutdownComplete call to ShardConsumer +* [#1313](https://github.com/awslabs/amazon-kinesis-client/pull/1313) Add additional integration tests for multi-stream and cross account access +* [#1273](https://github.com/awslabs/amazon-kinesis-client/pull/1273) Optimize currentStreamConfigMap by cleaning up lingering streams +* [#1302](https://github.com/awslabs/amazon-kinesis-client/pull/1302) Fix gracefulShutdown behavior in Scheduler + +### Release 2.5.8 (2024-03-27) +* [#1278](https://github.com/awslabs/amazon-kinesis-client/pull/1278) Upgrade awssdk.version from 2.25.3 to 2.25.11 +* [#1279](https://github.com/awslabs/amazon-kinesis-client/pull/1279) Upgrade org.apache.maven.plugins:maven-gpg-plugin from 3.1.0 to 3.2.1 +* [#1280](https://github.com/awslabs/amazon-kinesis-client/pull/1280) Upgrade org.apache.commons:commons-lang3 from 3.12.0 to 3.14.0 +* [#1282](https://github.com/awslabs/amazon-kinesis-client/pull/1282) Upgrade org.apache.maven.plugins:maven-javadoc-plugin from 3.5.0 to 3.6.3 +* [#1277](https://github.com/awslabs/amazon-kinesis-client/pull/1277) Reuse 'ShardSyncTaskManager' instance for existing stream to avoid duplicate enqueue of 'ShardSyncTask' + +### Release 2.5.7 (2024-03-19) +* [#1275](https://github.com/awslabs/amazon-kinesis-client/pull/1275) Update PollingConfig maxRecords method to return PollingConfig +* [#1236](https://github.com/awslabs/amazon-kinesis-client/pull/1236) Upgrade commons-io:commons-io from 2.11.0 to 2.15.1 +* [#1189](https://github.com/awslabs/amazon-kinesis-client/pull/1189) Upgrade org.apache.maven.plugins:maven-resources-plugin from 3.3.0 to 3.3.1 +* [#1139](https://github.com/awslabs/amazon-kinesis-client/pull/1139) Upgrade maven-surefire-plugin from 2.22.2 to 3.1.2 +* [#1138](https://github.com/awslabs/amazon-kinesis-client/pull/1138) Upgrade maven-failsafe-plugin from 2.22.2 to 3.1.2 +* [#1125](https://github.com/awslabs/amazon-kinesis-client/pull/1125) Upgrade maven-gpg-plugin from 3.0.1 to 3.1.0 + +### Release 2.5.6 (2024-03-08) +* [#1271](https://github.com/awslabs/amazon-kinesis-client/pull/1271) Adding snapshot for 2.5.6-SNAPSHOT +* [#1268](https://github.com/awslabs/amazon-kinesis-client/pull/1268) Upgrade ch.qos.logback:logback-classic dependency from 1.3.12 to 1.3.14 +* [#1268](https://github.com/awslabs/amazon-kinesis-client/pull/1268) Upgrade awssdk.version from 2.20.43 to 2.25.3 +* [#1268](https://github.com/awslabs/amazon-kinesis-client/pull/1268) Upgrade aws-java-sdk.version from 1.12.405 to 1.12.668 +* [#1268](https://github.com/awslabs/amazon-kinesis-client/pull/1268) Upgrade gsr.version from 1.1.17 to 1.1.19 + +### Release 2.5.5 (2024-02-22) +* [#1257](https://github.com/awslabs/amazon-kinesis-client/pull/1257) Prevent improper error logging during worker shutdown +* [#1260](https://github.com/awslabs/amazon-kinesis-client/pull/1260) Add Deletion protection config +* [#1258](https://github.com/awslabs/amazon-kinesis-client/pull/1258) Fix issue in configuring metricsEnabledDimensions +* [#1259](https://github.com/awslabs/amazon-kinesis-client/pull/1259) Add snapshot to version + +### Release 2.5.4 (December 12, 2023) +* [#1232](https://github.com/awslabs/amazon-kinesis-client/pull/1232) Upgrade ch.qos.logback:logback-classic dependency from 1.3.0 to 1.3.12 in /amazon-kinesis-client +* [#1233](https://github.com/awslabs/amazon-kinesis-client/pull/1233) Upgrade ch.qos.logback:logback-classic dependency from 1.3.0 to 1.3.12 in /amazon-kinesis-client-multilang +* [#1230](https://github.com/awslabs/amazon-kinesis-client/pull/1230) Bug fix which now allows MultiLangDaemon to configure idleTimeBetweenReadsInMillis +* [#1229](https://github.com/awslabs/amazon-kinesis-client/pull/1229) Added link to `javadoc.io`-hosted Javadoc in the README +* [#1218](https://github.com/awslabs/amazon-kinesis-client/pull/1218) Added doc for leases and the lease lifecycle to help explain lease lifecycle logic. +* [#1226](https://github.com/awslabs/amazon-kinesis-client/pull/1226) Upgraded KCL from 2.5.3 to 2.5.4-SNAPSHOT + +### Release 2.5.3 (November 8, 2023) +* [#1219](https://github.com/awslabs/amazon-kinesis-client/pull/1219) Provided streamArn in getRecords request +* [#1216](https://github.com/awslabs/amazon-kinesis-client/pull/1216) Updated AWS Glue Schema Registry from version 1.1.14 to 1.1.17. +* [#1205](https://github.com/awslabs/amazon-kinesis-client/pull/1205) Updated the FAQ with impact of changing default checkpoint. +* [#1203](https://github.com/awslabs/amazon-kinesis-client/pull/1203) Added links from README.md to FAQ and doc folder. +* [#1202](https://github.com/awslabs/amazon-kinesis-client/pull/1202) Introduced a FAQ for Kinesis Client Library +* [#1200](https://github.com/awslabs/amazon-kinesis-client/pull/1200) Added test case for StreamIdentifier serialization. + +### Release 2.5.2 (August 7, 2023) +* [#1184](https://github.com/awslabs/amazon-kinesis-client/pull/1184) [#367] Enhanced multi-lang `AWSCredentialsProvider=...` decoder and c… +* [#1186](https://github.com/awslabs/amazon-kinesis-client/pull/1186) Provided documentation for multilang's new NestedPropertyKey enhancement. +* [#1181](https://github.com/awslabs/amazon-kinesis-client/pull/1181) CVE-2023-2976: Upgrade Google Guava dependency version from `32.0.0-jre` to `32.1.1-jre` +* [#1159](https://github.com/awslabs/amazon-kinesis-client/pull/1159) Bug fix in lease refresher integration test with occasional failures +* [#1157](https://github.com/awslabs/amazon-kinesis-client/pull/1157) Fix NPE on graceful shutdown before DDB `LeaseCoordinator` starts. +* [#1152](https://github.com/awslabs/amazon-kinesis-client/pull/1152) Adding resharding integration tests and changing ITs to not run by default +* [#1162](https://github.com/awslabs/amazon-kinesis-client/pull/1162) Only deleting resource created by ITs +* [#1158](https://github.com/awslabs/amazon-kinesis-client/pull/1158) Checkstyle: tightened `LineLength` restriction from 170 to 150. +* [#1151](https://github.com/awslabs/amazon-kinesis-client/pull/1151) Modified `dependabot.yml` to set the correct `v[1|2].x` label. +* [#1164](https://github.com/awslabs/amazon-kinesis-client/pull/1164) Upgraded KCL Version from 2.5.1 to 2.5.2-SNAPSHOT + +### Release 2.5.1 (June 27, 2023) +* [#1143](https://github.com/awslabs/amazon-kinesis-client/pull/1143) Upgrade MultiLangDaemon to support StreamARN +* [#1145](https://github.com/awslabs/amazon-kinesis-client/pull/1145) Introduced GitHub actions to trigger Maven builds during merge/pull requests +* [#1136](https://github.com/awslabs/amazon-kinesis-client/pull/1136) Added testing architecture and KCL 2.x basic polling/streaming tests +* [#1153](https://github.com/awslabs/amazon-kinesis-client/pull/1153) Checkstyle: added `UnusedImports` check. +* [#1150](https://github.com/awslabs/amazon-kinesis-client/pull/1150) Enabled Checkstyle validation of test resources. +* [#1149](https://github.com/awslabs/amazon-kinesis-client/pull/1149) Bound Checkstyle to `validate` goal for automated enforcement. +* [#1148](https://github.com/awslabs/amazon-kinesis-client/pull/1148) Code cleanup to faciliate Checkstyle enforcement. +* [#1142](https://github.com/awslabs/amazon-kinesis-client/pull/1142) Upgrade Google Guava dependency version from 31.1-jre to 32.0.0-jre +* [#1115](https://github.com/awslabs/amazon-kinesis-client/pull/1115) Update KCL version from 2.5.0 to 2.5.1-SNAPSHOT + +### Release 2.5.0 (May 19, 2023) +* **[#1109](https://github.com/awslabs/amazon-kinesis-client/pull/1109) Add support for stream ARNs** +* **[#1065](https://github.com/awslabs/amazon-kinesis-client/pull/1065) Allow tags to be added when lease table is created** +* [#1094](https://github.com/awslabs/amazon-kinesis-client/pull/1094) Code cleanup to introduce better testing +* [#1088](https://github.com/awslabs/amazon-kinesis-client/pull/1088) Minimize race in PSSM to optimize shard sync calls +* [#1086](https://github.com/awslabs/amazon-kinesis-client/pull/1086) Add additional SingleStreamTracker constructor with stream position parameter +* [#1084](https://github.com/awslabs/amazon-kinesis-client/pull/1084) More consistent testing behavior with restartAfterRequestTimerExpires +* [#1066](https://github.com/awslabs/amazon-kinesis-client/pull/1066) More consistent testing behavior with HashRangesAreAlwaysComplete +* [#1072](https://github.com/awslabs/amazon-kinesis-client/pull/1072) Upgrade nexus-staging-maven-plugin from 1.6.8 to 1.6.13 +* [#1073](https://github.com/awslabs/amazon-kinesis-client/pull/1073) Upgrade slf4j-api from 2.0.6 to 2.0.7 +* [#1090](https://github.com/awslabs/amazon-kinesis-client/pull/1090) Upgrade awssdk.version from 2.20.8 to 2.20.43 +* [#1071](https://github.com/awslabs/amazon-kinesis-client/pull/1071) Upgrade maven-compiler-plugin from 3.8.1 to 3.11.0 + +### Release 2.4.8 (March 21, 2023) +* [#1080](https://github.com/awslabs/amazon-kinesis-client/pull/1080) Added metric in `ShutdownTask` for scenario when parent leases are missing. +* [#1077](https://github.com/awslabs/amazon-kinesis-client/pull/1077) Reverted changes to pom property +* [#1069](https://github.com/awslabs/amazon-kinesis-client/pull/1069) Fixed flaky InitializationWaitsWhenLeaseTableIsEmpty test + + +### Release 2.4.7 (March 17, 2023) +* **NOTE: Due to an issue during the release process, the 2.4.7 published artifacts are incomplete and non-viable. Please use 2.4.8 or later.** +* [#1063](https://github.com/awslabs/amazon-kinesis-client/pull/1063) Allow leader to learn new leases upon re-election to avoid unnecessary shardSyncs +* [#1060](https://github.com/awslabs/amazon-kinesis-client/pull/1060) Add new metric to be emitted on lease creation +* [#1057](https://github.com/awslabs/amazon-kinesis-client/pull/1057) Added more logging in `Scheduler` w.r.t. `StreamConfig`s. +* [#1059](https://github.com/awslabs/amazon-kinesis-client/pull/1059) DRY: simplification of `HierarchicalShardSyncerTest`. +* [#1062](https://github.com/awslabs/amazon-kinesis-client/pull/1062) Fixed retry storm in `PrefetchRecordsPublisher`. +* [#1061](https://github.com/awslabs/amazon-kinesis-client/pull/1061) Fixed NPE in `LeaseCleanupManager`. +* [#1056](https://github.com/awslabs/amazon-kinesis-client/pull/1056) Clean up in-memory state of deleted kinesis stream in MultiStreamMode +* [#1058](https://github.com/awslabs/amazon-kinesis-client/pull/1058) Documentation: added `
` tags so fixed-format diagrams aren't garbled.
+* [#1053](https://github.com/awslabs/amazon-kinesis-client/pull/1053) Exposed convenience method of `ExtendedSequenceNumber#isSentinelCheckpoint()`
+* [#1043](https://github.com/awslabs/amazon-kinesis-client/pull/1043) Removed a `.swp` file, and updated `.gitignore`.
+* [#1047](https://github.com/awslabs/amazon-kinesis-client/pull/1047) Upgrade awssdk.version from 2.19.31 to 2.20.8
+* [#1046](https://github.com/awslabs/amazon-kinesis-client/pull/1046) Upgrade maven-javadoc-plugin from 3.3.1 to 3.5.0
+* [#1038](https://github.com/awslabs/amazon-kinesis-client/pull/1038) Upgrade gsr.version from 1.1.13 to 1.1.14
+* [#1037](https://github.com/awslabs/amazon-kinesis-client/pull/1037) Upgrade aws-java-sdk.version from 1.12.370 to 1.12.405
+
+### Release 2.4.6 (February 21, 2023)
+* [#1041](https://github.com/awslabs/amazon-kinesis-client/pull/1041) Minor optimizations (e.g., calculate-once, put instead of get+put)
+* [#1035](https://github.com/awslabs/amazon-kinesis-client/pull/1035) Release Note updates to avoid duplication and bitrot (e.g., 1.x release
+* [#935](https://github.com/awslabs/amazon-kinesis-client/pull/935) Pass isAtShardEnd correctly to processRecords call
+* [#1040](https://github.com/awslabs/amazon-kinesis-client/pull/1040) Increased logging verbosity around lease management
+* [#1024](https://github.com/awslabs/amazon-kinesis-client/pull/1024) Added logging w.r.t. StreamConfig handling.
+* [#1034](https://github.com/awslabs/amazon-kinesis-client/pull/1034) Optimization: 9~15% improvement in KinesisDataFetcher wall-time
+* [#1045](https://github.com/awslabs/amazon-kinesis-client/pull/1045) Fixed duplication of project version in children pom.xml
+* [#956](https://github.com/awslabs/amazon-kinesis-client/pull/956) Fixed warning message typos
+* [#795](https://github.com/awslabs/amazon-kinesis-client/pull/795) Fixed log message spacing
+* [#740](https://github.com/awslabs/amazon-kinesis-client/pull/740) Fixed typo in Comment
+* [#1028](https://github.com/awslabs/amazon-kinesis-client/pull/1028) Refactored MultiStreamTracker to provide and enhance OOP for both
+* [#1027](https://github.com/awslabs/amazon-kinesis-client/pull/1027) Removed CHECKSTYLE:OFF toggles which can invite/obscure sub-par code.
+* [#1032](https://github.com/awslabs/amazon-kinesis-client/pull/1032) Upgrade rxjava from 3.1.5 to 3.1.6
+* [#1030](https://github.com/awslabs/amazon-kinesis-client/pull/1030) Upgrade awssdk.version from 2.19.2 to 2.19.31
+* [#1029](https://github.com/awslabs/amazon-kinesis-client/pull/1029) Upgrade slf4j-api from 2.0.0 to 2.0.6
+* [#1015](https://github.com/awslabs/amazon-kinesis-client/pull/1015) Upgrade protobuf-java from 3.21.5 to 3.21.12
 
 ### Release 2.4.5 (January 04, 2023)
 * [#1014](https://github.com/awslabs/amazon-kinesis-client/pull/1014) Use AFTER_SEQUENCE_NUMBER iterator type for expired iterator request
diff --git a/README.md b/README.md
index 90853c1f..6737c4b1 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,9 @@ The **Amazon Kinesis Client Library for Java** (Amazon KCL) enables Java develop
 
 * [Kinesis Product Page][kinesis]
 * [Forum][kinesis-forum]
+* [Javadoc][kcl-javadoc]
+* [FAQ](docs/FAQ.md)
+* [KCL Documentation](docs/) (folder)
 * [Issues][kinesis-client-library-issues]
 
 ### Recommended Upgrade for All Users of the 1.x Amazon Kinesis Client
@@ -32,9 +35,19 @@ Please open an issue if you have any questions.
 ## Building from Source
 
 After you've downloaded the code from GitHub, you can build it using Maven. To disable GPG signing in the build, use
- this command: `mvn clean install -Dgpg.skip=true`. Note: This command runs Integration tests, which in turn creates AWS
-  resources (which requires manual cleanup). Integration tests require valid AWS credentials need to be discovered at
-   runtime. To skip running integration tests, add ` -DskipITs` option to the build command.  
+this command: `mvn clean install -Dgpg.skip=true`. 
+Note: This command does not run integration tests.
+
+To disable running unit tests in the build, add the property `-Dskip.ut=true`.
+
+## Running Integration Tests
+
+Note that running integration tests creates AWS resources.
+Integration tests require valid AWS credentials.
+This will look for a default AWS profile specified in your local `.aws/credentials`.
+To run all integration tests: `mvn verify -DskipITs=false`.
+To run one integration tests, specify the integration test class: `mvn -Dit.test="BasicStreamConsumerIntegrationTest" -DskipITs=false verify`
+Optionally, you can provide the name of an IAM user/role to run tests with as a string using this command: `mvn -DskipITs=false -DawsProfile="" verify`.
 
 ## Integration with the Kinesis Producer Library
 For producer-side developers using the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]**, the KCL integrates without additional effort. When the KCL retrieves an aggregated Amazon Kinesis record consisting of multiple KPL user records, it will automatically invoke the KPL to extract the individual user records before returning them to the user.
@@ -50,7 +63,7 @@ The recommended way to use the KCL for Java is to consume it from Maven.
   
       software.amazon.kinesis
       amazon-kinesis-client
-      2.4.4
+      2.6.0
   
   ```
 
@@ -71,10 +84,11 @@ The recommended way to use the KCL for Java is to consume it from Maven.
 | 2.x | [master/CHANGELOG.md](CHANGELOG.md) |
 | 1.x | [v1.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v1.x/CHANGELOG.md) |
 
-[kinesis]: http://aws.amazon.com/kinesis
-[kinesis-forum]: http://developer.amazonwebservices.com/connect/forum.jspa?forumID=169
-[kinesis-client-library-issues]: https://github.com/awslabs/amazon-kinesis-client/issues
 [docs-signup]: http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-setup.html
+[kcl-javadoc]: https://javadoc.io/doc/software.amazon.kinesis/amazon-kinesis-client/
+[kinesis]: http://aws.amazon.com/kinesis
+[kinesis-client-library-issues]: https://github.com/awslabs/amazon-kinesis-client/issues
+[kinesis-forum]: http://developer.amazonwebservices.com/connect/forum.jspa?forumID=169
 [kinesis-guide]: http://docs.aws.amazon.com/kinesis/latest/dev/introduction.html
 [kinesis-guide-begin]: http://docs.aws.amazon.com/kinesis/latest/dev/before-you-begin.html
 [kinesis-guide-create]: http://docs.aws.amazon.com/kinesis/latest/dev/step-one-create-stream.html
@@ -83,5 +97,5 @@ The recommended way to use the KCL for Java is to consume it from Maven.
 [kinesis-guide-kpl]: http://docs.aws.amazon.com//kinesis/latest/dev/developing-producers-with-kpl.html
 [kinesis-guide-consumer-deaggregation]: http://docs.aws.amazon.com//kinesis/latest/dev/kinesis-kpl-consumer-deaggregation.html
 [kclpy]: https://github.com/awslabs/amazon-kinesis-client-python
-[multi-lang-protocol]: https://github.com/awslabs/amazon-kinesis-client/blob/master/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java
+[multi-lang-protocol]: /amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java
 [migration-guide]: https://docs.aws.amazon.com/streams/latest/dev/kcl-migration.html
diff --git a/amazon-kinesis-client-multilang/pom.xml b/amazon-kinesis-client-multilang/pom.xml
index 8ec7c5a7..c972b61c 100644
--- a/amazon-kinesis-client-multilang/pom.xml
+++ b/amazon-kinesis-client-multilang/pom.xml
@@ -21,14 +21,14 @@
   
     amazon-kinesis-client-pom
     software.amazon.kinesis
-    ${revision}
+    2.6.1-SNAPSHOT
   
   4.0.0
 
   amazon-kinesis-client-multilang
 
   
-    1.12.370
+    1.12.668
   
 
   
@@ -78,7 +78,7 @@
     
       ch.qos.logback
       logback-classic
-      1.3.0
+      1.3.14
     
     
       com.beust
@@ -88,7 +88,7 @@
     
       commons-io
       commons-io
-      2.11.0
+      2.16.1
     
     
       org.apache.commons
@@ -130,10 +130,9 @@
         
           org.apache.maven.plugins
           maven-compiler-plugin
-          3.8.1
+          3.13.0
           
-            1.8
-            1.8
+            8
             UTF-8
           
         
@@ -143,7 +142,7 @@
       
         org.apache.maven.plugins
         maven-javadoc-plugin
-        3.3.1
+        3.7.0
         
           
             attach-javadocs
@@ -166,7 +165,29 @@
           
         
       
+      
+        com.diffplug.spotless
+        spotless-maven-plugin
+        2.30.0 
+        
+          
+            
+            
+              java,,\#
+            
+          
+        
+        
+          
+            
+              check
+            
+            compile
+          
+        
+      
     
+
   
 
   
diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java
index 54797050..95c82569 100644
--- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java
+++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java
@@ -18,11 +18,9 @@ import java.util.Date;
 import java.util.Optional;
 import java.util.Set;
 
-import org.apache.commons.lang3.Validate;
-
 import com.google.common.collect.ImmutableSet;
-
 import lombok.Getter;
+import org.apache.commons.lang3.Validate;
 import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
 import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer;
 import software.amazon.kinesis.common.InitialPositionInStream;
@@ -119,14 +117,16 @@ public class KinesisClientLibConfiguration {
     /**
      * Metrics dimensions that always will be enabled regardless of the config provided by user.
      */
-    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS = ImmutableSet
-            .of(MetricsUtil.OPERATION_DIMENSION_NAME);
+    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS =
+            ImmutableSet.of(MetricsUtil.OPERATION_DIMENSION_NAME);
 
     /**
      * Allowed dimensions for CloudWatch metrics. By default, worker ID dimension will be disabled.
      */
-    public static final Set DEFAULT_METRICS_ENABLED_DIMENSIONS = ImmutableSet. builder()
-            .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).add(MetricsUtil.SHARD_ID_DIMENSION_NAME).build();
+    public static final Set DEFAULT_METRICS_ENABLED_DIMENSIONS = ImmutableSet.builder()
+            .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS)
+            .add(MetricsUtil.SHARD_ID_DIMENSION_NAME)
+            .build();
 
     /**
      * Metrics dimensions that signify all possible dimensions.
@@ -217,6 +217,9 @@ public class KinesisClientLibConfiguration {
     private AwsCredentialsProvider dynamoDBCredentialsProvider;
     private AwsCredentialsProvider cloudWatchCredentialsProvider;
     private long failoverTimeMillis;
+    private boolean enablePriorityLeaseAssignment;
+    private boolean leaseTableDeletionProtectionEnabled;
+    private boolean leaseTablePitrEnabled;
     private String workerIdentifier;
     private long shardSyncIntervalMillis;
     private int maxRecords;
@@ -284,8 +287,8 @@ public class KinesisClientLibConfiguration {
      * @param workerId
      *            Used to distinguish different workers/processes of a Kinesis application
      */
-    public KinesisClientLibConfiguration(String applicationName, String streamName,
-            AwsCredentialsProvider credentialsProvider, String workerId) {
+    public KinesisClientLibConfiguration(
+            String applicationName, String streamName, AwsCredentialsProvider credentialsProvider, String workerId) {
         this(applicationName, streamName, credentialsProvider, credentialsProvider, credentialsProvider, workerId);
     }
 
@@ -307,16 +310,36 @@ public class KinesisClientLibConfiguration {
      * @param workerId
      *            Used to distinguish different workers/processes of a Kinesis application
      */
-    public KinesisClientLibConfiguration(String applicationName, String streamName,
-            AwsCredentialsProvider kinesisCredentialsProvider, AwsCredentialsProvider dynamoDBCredentialsProvider,
-            AwsCredentialsProvider cloudWatchCredentialsProvider, String workerId) {
-        this(applicationName, streamName, null, null, DEFAULT_INITIAL_POSITION_IN_STREAM, kinesisCredentialsProvider,
-                dynamoDBCredentialsProvider, cloudWatchCredentialsProvider, DEFAULT_FAILOVER_TIME_MILLIS, workerId,
-                DEFAULT_MAX_RECORDS, DEFAULT_IDLETIME_BETWEEN_READS_MILLIS,
-                DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS,
-                DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION,
-                DEFAULT_TASK_BACKOFF_TIME_MILLIS, DEFAULT_METRICS_BUFFER_TIME_MILLIS, DEFAULT_METRICS_MAX_QUEUE_SIZE,
-                DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, null, DEFAULT_SHUTDOWN_GRACE_MILLIS,
+    public KinesisClientLibConfiguration(
+            String applicationName,
+            String streamName,
+            AwsCredentialsProvider kinesisCredentialsProvider,
+            AwsCredentialsProvider dynamoDBCredentialsProvider,
+            AwsCredentialsProvider cloudWatchCredentialsProvider,
+            String workerId) {
+        this(
+                applicationName,
+                streamName,
+                null,
+                null,
+                DEFAULT_INITIAL_POSITION_IN_STREAM,
+                kinesisCredentialsProvider,
+                dynamoDBCredentialsProvider,
+                cloudWatchCredentialsProvider,
+                DEFAULT_FAILOVER_TIME_MILLIS,
+                workerId,
+                DEFAULT_MAX_RECORDS,
+                DEFAULT_IDLETIME_BETWEEN_READS_MILLIS,
+                DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST,
+                DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS,
+                DEFAULT_SHARD_SYNC_INTERVAL_MILLIS,
+                DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION,
+                DEFAULT_TASK_BACKOFF_TIME_MILLIS,
+                DEFAULT_METRICS_BUFFER_TIME_MILLIS,
+                DEFAULT_METRICS_MAX_QUEUE_SIZE,
+                DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING,
+                null,
+                DEFAULT_SHUTDOWN_GRACE_MILLIS,
                 DEFAULT_SCHEDULER_INITIALIZATION_BACKOFF_TIME_MILLIS);
     }
 
@@ -376,20 +399,53 @@ public class KinesisClientLibConfiguration {
      */
     // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES
     // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES
-    public KinesisClientLibConfiguration(String applicationName, String streamName, String kinesisEndpoint,
-            InitialPositionInStream initialPositionInStream, AwsCredentialsProvider kinesisCredentialsProvider,
-            AwsCredentialsProvider dynamoDBCredentialsProvider, AwsCredentialsProvider cloudWatchCredentialsProvider,
-            long failoverTimeMillis, String workerId, int maxRecords, long idleTimeBetweenReadsInMillis,
-            boolean callProcessRecordsEvenForEmptyRecordList, long parentShardPollIntervalMillis,
-            long shardSyncIntervalMillis, boolean cleanupTerminatedShardsBeforeExpiry, long taskBackoffTimeMillis,
-            long metricsBufferTimeMillis, int metricsMaxQueueSize, boolean validateSequenceNumberBeforeCheckpointing,
-            String regionName, long shutdownGraceMillis, long schedulerInitializationBackoffTimeMillis) {
-        this(applicationName, streamName, kinesisEndpoint, null, initialPositionInStream, kinesisCredentialsProvider,
-                dynamoDBCredentialsProvider, cloudWatchCredentialsProvider, failoverTimeMillis, workerId, maxRecords,
-                idleTimeBetweenReadsInMillis, callProcessRecordsEvenForEmptyRecordList, parentShardPollIntervalMillis,
-                shardSyncIntervalMillis, cleanupTerminatedShardsBeforeExpiry, taskBackoffTimeMillis,
-                metricsBufferTimeMillis, metricsMaxQueueSize, validateSequenceNumberBeforeCheckpointing, regionName,
-                shutdownGraceMillis, schedulerInitializationBackoffTimeMillis);
+    public KinesisClientLibConfiguration(
+            String applicationName,
+            String streamName,
+            String kinesisEndpoint,
+            InitialPositionInStream initialPositionInStream,
+            AwsCredentialsProvider kinesisCredentialsProvider,
+            AwsCredentialsProvider dynamoDBCredentialsProvider,
+            AwsCredentialsProvider cloudWatchCredentialsProvider,
+            long failoverTimeMillis,
+            String workerId,
+            int maxRecords,
+            long idleTimeBetweenReadsInMillis,
+            boolean callProcessRecordsEvenForEmptyRecordList,
+            long parentShardPollIntervalMillis,
+            long shardSyncIntervalMillis,
+            boolean cleanupTerminatedShardsBeforeExpiry,
+            long taskBackoffTimeMillis,
+            long metricsBufferTimeMillis,
+            int metricsMaxQueueSize,
+            boolean validateSequenceNumberBeforeCheckpointing,
+            String regionName,
+            long shutdownGraceMillis,
+            long schedulerInitializationBackoffTimeMillis) {
+        this(
+                applicationName,
+                streamName,
+                kinesisEndpoint,
+                null,
+                initialPositionInStream,
+                kinesisCredentialsProvider,
+                dynamoDBCredentialsProvider,
+                cloudWatchCredentialsProvider,
+                failoverTimeMillis,
+                workerId,
+                maxRecords,
+                idleTimeBetweenReadsInMillis,
+                callProcessRecordsEvenForEmptyRecordList,
+                parentShardPollIntervalMillis,
+                shardSyncIntervalMillis,
+                cleanupTerminatedShardsBeforeExpiry,
+                taskBackoffTimeMillis,
+                metricsBufferTimeMillis,
+                metricsMaxQueueSize,
+                validateSequenceNumberBeforeCheckpointing,
+                regionName,
+                shutdownGraceMillis,
+                schedulerInitializationBackoffTimeMillis);
     }
 
     /**
@@ -448,15 +504,30 @@ public class KinesisClientLibConfiguration {
      */
     // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES
     // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES
-    public KinesisClientLibConfiguration(String applicationName, String streamName, String kinesisEndpoint,
-            String dynamoDBEndpoint, InitialPositionInStream initialPositionInStream,
-            AwsCredentialsProvider kinesisCredentialsProvider, AwsCredentialsProvider dynamoDBCredentialsProvider,
-            AwsCredentialsProvider cloudWatchCredentialsProvider, long failoverTimeMillis, String workerId,
-            int maxRecords, long idleTimeBetweenReadsInMillis, boolean callProcessRecordsEvenForEmptyRecordList,
-            long parentShardPollIntervalMillis, long shardSyncIntervalMillis,
-            boolean cleanupTerminatedShardsBeforeExpiry, long taskBackoffTimeMillis, long metricsBufferTimeMillis,
-            int metricsMaxQueueSize, boolean validateSequenceNumberBeforeCheckpointing, String regionName,
-            long shutdownGraceMillis, long schedulerInitializationBackoffTimeMillis) {
+    public KinesisClientLibConfiguration(
+            String applicationName,
+            String streamName,
+            String kinesisEndpoint,
+            String dynamoDBEndpoint,
+            InitialPositionInStream initialPositionInStream,
+            AwsCredentialsProvider kinesisCredentialsProvider,
+            AwsCredentialsProvider dynamoDBCredentialsProvider,
+            AwsCredentialsProvider cloudWatchCredentialsProvider,
+            long failoverTimeMillis,
+            String workerId,
+            int maxRecords,
+            long idleTimeBetweenReadsInMillis,
+            boolean callProcessRecordsEvenForEmptyRecordList,
+            long parentShardPollIntervalMillis,
+            long shardSyncIntervalMillis,
+            boolean cleanupTerminatedShardsBeforeExpiry,
+            long taskBackoffTimeMillis,
+            long metricsBufferTimeMillis,
+            int metricsMaxQueueSize,
+            boolean validateSequenceNumberBeforeCheckpointing,
+            String regionName,
+            long shutdownGraceMillis,
+            long schedulerInitializationBackoffTimeMillis) {
         // Check following values are greater than zero
         checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis);
         checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis);
@@ -494,8 +565,8 @@ public class KinesisClientLibConfiguration {
         this.maxLeasesToStealAtOneTime = DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME;
         this.initialLeaseTableReadCapacity = DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY;
         this.initialLeaseTableWriteCapacity = DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY;
-        this.initialPositionInStreamExtended = InitialPositionInStreamExtended
-                .newInitialPosition(initialPositionInStream);
+        this.initialPositionInStreamExtended =
+                InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream);
         this.skipShardSyncAtWorkerInitializationIfLeasesExist = DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST;
         this.shardPrioritization = DEFAULT_SHARD_PRIORITIZATION;
         this.recordsFetcherFactory = new SimpleRecordsFetcherFactory();
@@ -558,15 +629,30 @@ public class KinesisClientLibConfiguration {
      */
     // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES
     // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES
-    public KinesisClientLibConfiguration(String applicationName, String streamName, String kinesisEndpoint,
-            String dynamoDBEndpoint, InitialPositionInStream initialPositionInStream,
-            AwsCredentialsProvider kinesisCredentialsProvider, AwsCredentialsProvider dynamoDBCredentialsProvider,
-            AwsCredentialsProvider cloudWatchCredentialsProvider, long failoverTimeMillis, String workerId,
-            int maxRecords, long idleTimeBetweenReadsInMillis, boolean callProcessRecordsEvenForEmptyRecordList,
-            long parentShardPollIntervalMillis, long shardSyncIntervalMillis,
-            boolean cleanupTerminatedShardsBeforeExpiry, long taskBackoffTimeMillis, long metricsBufferTimeMillis,
-            int metricsMaxQueueSize, boolean validateSequenceNumberBeforeCheckpointing, String regionName,
-            RecordsFetcherFactory recordsFetcherFactory, long schedulerInitializationBackoffTimeMillis) {
+    public KinesisClientLibConfiguration(
+            String applicationName,
+            String streamName,
+            String kinesisEndpoint,
+            String dynamoDBEndpoint,
+            InitialPositionInStream initialPositionInStream,
+            AwsCredentialsProvider kinesisCredentialsProvider,
+            AwsCredentialsProvider dynamoDBCredentialsProvider,
+            AwsCredentialsProvider cloudWatchCredentialsProvider,
+            long failoverTimeMillis,
+            String workerId,
+            int maxRecords,
+            long idleTimeBetweenReadsInMillis,
+            boolean callProcessRecordsEvenForEmptyRecordList,
+            long parentShardPollIntervalMillis,
+            long shardSyncIntervalMillis,
+            boolean cleanupTerminatedShardsBeforeExpiry,
+            long taskBackoffTimeMillis,
+            long metricsBufferTimeMillis,
+            int metricsMaxQueueSize,
+            boolean validateSequenceNumberBeforeCheckpointing,
+            String regionName,
+            RecordsFetcherFactory recordsFetcherFactory,
+            long schedulerInitializationBackoffTimeMillis) {
         // Check following values are greater than zero
         checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis);
         checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis);
@@ -606,8 +692,8 @@ public class KinesisClientLibConfiguration {
         this.maxLeasesToStealAtOneTime = DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME;
         this.initialLeaseTableReadCapacity = DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY;
         this.initialLeaseTableWriteCapacity = DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY;
-        this.initialPositionInStreamExtended = InitialPositionInStreamExtended
-                .newInitialPosition(initialPositionInStream);
+        this.initialPositionInStreamExtended =
+                InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream);
         this.skipShardSyncAtWorkerInitializationIfLeasesExist = DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST;
         this.shardPrioritization = DEFAULT_SHARD_PRIORITIZATION;
         this.recordsFetcherFactory = recordsFetcherFactory;
@@ -932,8 +1018,8 @@ public class KinesisClientLibConfiguration {
      */
     public KinesisClientLibConfiguration withInitialPositionInStream(InitialPositionInStream initialPositionInStream) {
         this.initialPositionInStream = initialPositionInStream;
-        this.initialPositionInStreamExtended = InitialPositionInStreamExtended
-                .newInitialPosition(initialPositionInStream);
+        this.initialPositionInStreamExtended =
+                InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream);
         return this;
     }
 
@@ -959,6 +1045,11 @@ public class KinesisClientLibConfiguration {
         return this;
     }
 
+    public KinesisClientLibConfiguration withEnablePriorityLeaseAssignment(boolean enablePriorityLeaseAssignment) {
+        this.enablePriorityLeaseAssignment = enablePriorityLeaseAssignment;
+        return this;
+    }
+
     /**
      * @param shardSyncIntervalMillis
      *            Time between tasks to sync leases and Kinesis shards
@@ -977,6 +1068,10 @@ public class KinesisClientLibConfiguration {
      */
     public KinesisClientLibConfiguration withMaxRecords(int maxRecords) {
         checkIsValuePositive("MaxRecords", (long) maxRecords);
+        if (maxRecords > DEFAULT_MAX_RECORDS) {
+            throw new IllegalArgumentException("maxRecords must be less than or equal to " + DEFAULT_MAX_RECORDS
+                    + " but current value is " + maxRecords);
+        }
         this.maxRecords = maxRecords;
         return this;
     }
@@ -1135,8 +1230,10 @@ public class KinesisClientLibConfiguration {
         } else if (metricsEnabledDimensions.contains(MetricsScope.METRICS_DIMENSIONS_ALL)) {
             this.metricsEnabledDimensions = METRICS_DIMENSIONS_ALL;
         } else {
-            this.metricsEnabledDimensions = ImmutableSet. builder().addAll(metricsEnabledDimensions)
-                    .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).build();
+            this.metricsEnabledDimensions = ImmutableSet.builder()
+                    .addAll(metricsEnabledDimensions)
+                    .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS)
+                    .build();
         }
         return this;
     }
@@ -1267,7 +1364,8 @@ public class KinesisClientLibConfiguration {
      * @return this configuration object
      */
     public KinesisClientLibConfiguration withMaxLeaseRenewalThreads(int maxLeaseRenewalThreads) {
-        Validate.isTrue(maxLeaseRenewalThreads > 2,
+        Validate.isTrue(
+                maxLeaseRenewalThreads > 2,
                 "The maximum number of lease renewal threads must be greater than or equal to 2.");
         this.maxLeaseRenewalThreads = maxLeaseRenewalThreads;
 
@@ -1327,7 +1425,8 @@ public class KinesisClientLibConfiguration {
      * @return KinesisClientLibConfiguration
      */
     public KinesisClientLibConfiguration withDataFetchingStrategy(String dataFetchingStrategy) {
-        this.recordsFetcherFactory.dataFetchingStrategy(DataFetchingStrategy.valueOf(dataFetchingStrategy.toUpperCase()));
+        this.recordsFetcherFactory.dataFetchingStrategy(
+                DataFetchingStrategy.valueOf(dataFetchingStrategy.toUpperCase()));
         return this;
     }
 
@@ -1413,7 +1512,8 @@ public class KinesisClientLibConfiguration {
      *            Interval in milliseconds between retrying the scheduler initialization.
      * @return
      */
-    public KinesisClientLibConfiguration withSchedulerInitializationBackoffTimeMillis(long schedulerInitializationBackoffTimeMillis) {
+    public KinesisClientLibConfiguration withSchedulerInitializationBackoffTimeMillis(
+            long schedulerInitializationBackoffTimeMillis) {
         checkIsValuePositive("schedulerInitializationBackoffTimeMillis", schedulerInitializationBackoffTimeMillis);
         this.schedulerInitializationBackoffTimeMillis = schedulerInitializationBackoffTimeMillis;
         return this;
diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDERRTask.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDERRTask.java
index f175d905..1f67ece5 100644
--- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDERRTask.java
+++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDERRTask.java
@@ -23,8 +23,7 @@ import lombok.extern.slf4j.Slf4j;
  */
 @Slf4j
 class DrainChildSTDERRTask extends LineReaderTask {
-    DrainChildSTDERRTask() {
-    }
+    DrainChildSTDERRTask() {}
 
     @Override
     protected HandleLineResult handleLine(String line) {
diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDOUTTask.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDOUTTask.java
index 19208304..573fe570 100644
--- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDOUTTask.java
+++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDOUTTask.java
@@ -22,23 +22,22 @@ import lombok.extern.slf4j.Slf4j;
  * This class is used to drain the STDOUT of the child process. After the child process has been given a shutdown
  * message and responded indicating that it is shutdown, we attempt to close the input and outputs of that process so
  * that the process can exit.
- * 
+ *
  * To understand why this is necessary, consider the following scenario:
- * 
+ *
  * 
    *
  1. Child process responds that it is done with shutdown.
  2. *
  3. Child process prints debugging text to STDOUT that fills the pipe buffer so child becomes blocked.
  4. *
  5. Parent process doesn't drain child process's STDOUT.
  6. *
  7. Child process remains blocked.
  8. *
- * + * * To prevent the child process from becoming blocked in this way, it is the responsibility of the parent process to * drain the child process's STDOUT. We reprint each drained line to our log to permit debugging. */ @Slf4j class DrainChildSTDOUTTask extends LineReaderTask { - DrainChildSTDOUTTask() { - } + DrainChildSTDOUTTask() {} @Override protected HandleLineResult handleLine(String line) { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/GetNextMessageTask.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/GetNextMessageTask.java index 34fe30d3..a9fca27c 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/GetNextMessageTask.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/GetNextMessageTask.java @@ -17,10 +17,9 @@ package software.amazon.kinesis.multilang; import java.io.BufferedReader; import java.io.IOException; -import software.amazon.kinesis.multilang.messages.Message; import com.fasterxml.jackson.databind.ObjectMapper; - import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.multilang.messages.Message; /** * Gets the next message off the STDOUT of the child process. Throws an exception if a message is not found before the @@ -34,7 +33,7 @@ class GetNextMessageTask extends LineReaderTask { /** * Constructor. - * + * * @param objectMapper An object mapper for decoding json messages from the input stream. */ GetNextMessageTask(ObjectMapper objectMapper) { @@ -43,7 +42,7 @@ class GetNextMessageTask extends LineReaderTask { /** * Checks if a line is an empty line. - * + * * @param line A string * @return True if the line is an empty string, i.e. "", false otherwise. */ @@ -71,8 +70,10 @@ class GetNextMessageTask extends LineReaderTask { @Override protected Message returnAfterException(Exception e) { - throw new RuntimeException("Encountered an error while reading a line from STDIN for shard " + getShardId() - + " so won't be able to return a message.", e); + throw new RuntimeException( + "Encountered an error while reading a line from STDIN for shard " + getShardId() + + " so won't be able to return a message.", + e); } @Override diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/LineReaderTask.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/LineReaderTask.java index 3c01c7b7..915fc088 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/LineReaderTask.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/LineReaderTask.java @@ -30,7 +30,7 @@ import lombok.extern.slf4j.Slf4j; *
  • {@link #returnAfterEndOfInput()}
  • *
  • {@link #returnAfterException(Exception)}
  • * - * + * * @param */ @Slf4j @@ -41,8 +41,7 @@ abstract class LineReaderTask implements Callable { private String shardId; - LineReaderTask() { - } + LineReaderTask() {} /** * Reads lines off the input stream until a return value is set, or an exception is encountered, or the end of the @@ -72,7 +71,7 @@ abstract class LineReaderTask implements Callable { * return from the {@link #call()} function by having a value, indicating that value should be returned immediately * without reading further, or not having a value, indicating that more lines of input need to be read before * returning. - * + * * @param line A line read from the input stream. * @return HandleLineResult which may or may not have a has return value, indicating to return or not return yet * respectively. @@ -83,7 +82,7 @@ abstract class LineReaderTask implements Callable { * This method will be called if there is an error while reading from the input stream. The return value of this * method will be returned as the result of this Callable unless an Exception is thrown. If an Exception is thrown * then that exception will be thrown by the Callable. - * + * * @param e An exception that occurred while reading from the input stream. * @return What to return. */ @@ -93,7 +92,7 @@ abstract class LineReaderTask implements Callable { * This method will be called once the end of the input stream is reached. The return value of this method will be * returned as the result of this Callable. Implementations of this method are welcome to throw a runtime exception * to indicate that the task was unsuccessful. - * + * * @return What to return. */ protected abstract T returnAfterEndOfInput(); @@ -101,7 +100,7 @@ abstract class LineReaderTask implements Callable { /** * Allows subclasses to provide more detailed logs. Specifically, this allows the drain tasks and GetNextMessageTask * to log which shard they're working on. - * + * * @return The shard id */ public String getShardId() { @@ -110,7 +109,7 @@ abstract class LineReaderTask implements Callable { /** * The description should be a string explaining what this particular LineReader class does. - * + * * @return The description. */ public String getDescription() { @@ -121,7 +120,7 @@ abstract class LineReaderTask implements Callable { * The result of a call to {@link LineReaderTask#handleLine(String)}. Allows implementations of that method to * indicate whether a particular invocation of that method produced a return for this task or not. If a return value * doesn't exist the {@link #call()} method will continue to the next line. - * + * * @param */ protected class HandleLineResult { @@ -158,7 +157,7 @@ abstract class LineReaderTask implements Callable { * {@link MultiLangShardRecordProcessor#initialize(String)} is called. So we follow a pattern where the attributes are * set inside this method instead of the constructor so that this object will be initialized when all its attributes * are known to the record processor. - * + * * @param stream * @param shardId * @param description @@ -180,5 +179,4 @@ abstract class LineReaderTask implements Callable { this.description = description; return this; } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageReader.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageReader.java index a649490c..a8d5f64f 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageReader.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageReader.java @@ -20,19 +20,19 @@ import java.io.InputStreamReader; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; -import software.amazon.kinesis.multilang.messages.Message; import com.fasterxml.jackson.databind.ObjectMapper; +import software.amazon.kinesis.multilang.messages.Message; /** * Provides methods for interacting with the child process's STDOUT. - * + * * {@link #getNextMessageFromSTDOUT()} reads lines from the child process's STDOUT and attempts to decode a * {@link Message} object from each line. A child process's STDOUT could have lines that don't contain data related to * the multi-language protocol, such as when the child process prints debugging information to its STDOUT (instead of * logging to a file), also when a child processes writes a Message it is expected to prepend and append a new line * character to their message to help ensure that it is isolated on a line all by itself which results in empty lines * being present in STDOUT. Lines which cannot be decoded to a Message object are ignored. - * + * * {@link #drainSTDOUT()} simply reads all data from the child process's STDOUT until the stream is closed. */ class MessageReader { @@ -48,19 +48,18 @@ class MessageReader { /** * Use the initialize methods after construction. */ - MessageReader() { - } + MessageReader() {} /** * Returns a future which represents an attempt to read the next message in the child process's STDOUT. If the task * is successful, the result of the future will be the next message found in the child process's STDOUT, if the task * is unable to find a message before the child process's STDOUT is closed, or reading from STDOUT causes an * IOException, then an execution exception will be generated by this future. - * + * * The task employed by this method reads from the child process's STDOUT line by line. The task attempts to decode * each line into a {@link Message} object. Lines that fail to decode to a Message are ignored and the task * continues to the next line until it finds a Message. - * + * * @return */ Future getNextMessageFromSTDOUT() { @@ -73,7 +72,7 @@ class MessageReader { * Returns a future that represents a computation that drains the STDOUT of the child process. That future's result * is true if the end of the child's STDOUT is reached, its result is false if there was an error while reading from * the stream. This task will log all the lines it drains to permit debugging. - * + * * @return */ Future drainSTDOUT() { @@ -89,19 +88,16 @@ class MessageReader { * {@link MultiLangShardRecordProcessor#initialize(String)} is called. So we follow a pattern where the attributes are * set inside this method instead of the constructor so that this object will be initialized when all its attributes * are known to the record processor. - * + * * @param stream Used to read messages from the subprocess. * @param shardId The shard we're working on. * @param objectMapper The object mapper to decode messages. * @param executorService An executor service to run tasks in. */ - MessageReader initialize(InputStream stream, - String shardId, - ObjectMapper objectMapper, - ExecutorService executorService) { - return this.initialize(new BufferedReader(new InputStreamReader(stream)), shardId, objectMapper, - executorService); - + MessageReader initialize( + InputStream stream, String shardId, ObjectMapper objectMapper, ExecutorService executorService) { + return this.initialize( + new BufferedReader(new InputStreamReader(stream)), shardId, objectMapper, executorService); } /** @@ -110,10 +106,8 @@ class MessageReader { * @param objectMapper The object mapper to decode messages. * @param executorService An executor service to run tasks in. */ - MessageReader initialize(BufferedReader reader, - String shardId, - ObjectMapper objectMapper, - ExecutorService executorService) { + MessageReader initialize( + BufferedReader reader, String shardId, ObjectMapper objectMapper, ExecutorService executorService) { this.reader = reader; this.shardId = shardId; this.objectMapper = objectMapper; diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageWriter.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageWriter.java index 371c044b..c50c2004 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageWriter.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageWriter.java @@ -23,7 +23,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import com.fasterxml.jackson.databind.ObjectMapper; - import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; @@ -55,13 +54,12 @@ class MessageWriter { /** * Use initialize method after construction. */ - MessageWriter() { - } + MessageWriter() {} /** * Writes the message then writes the line separator provided by the system. Flushes each message to guarantee it * is delivered as soon as possible to the subprocess. - * + * * @param message A message to be written to the subprocess. * @return * @throws IOException @@ -76,7 +74,10 @@ class MessageWriter { */ synchronized (writer) { writer.write(message, 0, message.length()); - writer.write(System.lineSeparator(), 0, System.lineSeparator().length()); + writer.write( + System.lineSeparator(), + 0, + System.lineSeparator().length()); writer.flush(); } log.info("Message size == {} bytes for shard {}", message.getBytes().length, shardId); @@ -98,7 +99,7 @@ class MessageWriter { /** * Converts the message to a JSON string and writes it to the subprocess. - * + * * @param message A message to be written to the subprocess. * @return */ @@ -108,9 +109,9 @@ class MessageWriter { String jsonText = objectMapper.writeValueAsString(message); return writeMessageToOutput(jsonText); } catch (IOException e) { - String errorMessage = - String.format("Encountered I/O error while writing %s action to subprocess", message.getClass() - .getSimpleName()); + String errorMessage = String.format( + "Encountered I/O error while writing %s action to subprocess", + message.getClass().getSimpleName()); log.error(errorMessage, e); throw new RuntimeException(errorMessage, e); } @@ -118,7 +119,7 @@ class MessageWriter { /** * Writes an {@link InitializeMessage} to the subprocess. - * + * * @param initializationInput * contains information about the shard being initialized */ @@ -128,7 +129,7 @@ class MessageWriter { /** * Writes a {@link ProcessRecordsMessage} message to the subprocess. - * + * * @param processRecordsInput * the records, and associated metadata to be processed. */ @@ -138,7 +139,7 @@ class MessageWriter { /** * Writes the lease lost message to the sub process. - * + * * @param leaseLostInput * the lease lost input. This is currently unused as lease loss doesn't actually have anything in it * @return A future that is set when the message has been written. @@ -149,7 +150,7 @@ class MessageWriter { /** * Writes a message to the sub process indicating that the shard has ended - * + * * @param shardEndedInput * the shard end input. This is currently unused as the checkpoint is extracted, and used by the caller. * @return A future that is set when the message has been written. @@ -167,7 +168,7 @@ class MessageWriter { /** * Writes a {@link CheckpointMessage} to the subprocess. - * + * * @param sequenceNumber * The sequence number that was checkpointed. * @param subSequenceNumber @@ -175,14 +176,14 @@ class MessageWriter { * @param throwable * The exception that was thrown by a checkpoint attempt. Null if one didn't occur. */ - Future writeCheckpointMessageWithError(String sequenceNumber, Long subSequenceNumber, - Throwable throwable) { + Future writeCheckpointMessageWithError( + String sequenceNumber, Long subSequenceNumber, Throwable throwable) { return writeMessage(new CheckpointMessage(sequenceNumber, subSequenceNumber, throwable)); } /** * Closes the output stream and prevents further attempts to write. - * + * * @throws IOException Thrown when closing the writer fails */ void close() throws IOException { @@ -201,18 +202,16 @@ class MessageWriter { * {@link MultiLangShardRecordProcessor (String)} is called. So we follow a pattern where the attributes are * set inside this method instead of the constructor so that this object will be initialized when all its attributes * are known to the record processor. - * + * * @param stream Used to write messages to the subprocess. * @param shardId The shard we're working on. * @param objectMapper The object mapper to encode messages. * @param executorService An executor service to run tasks in. */ - MessageWriter initialize(OutputStream stream, - String shardId, - ObjectMapper objectMapper, - ExecutorService executorService) { - return this.initialize(new BufferedWriter(new OutputStreamWriter(stream)), shardId, objectMapper, - executorService); + MessageWriter initialize( + OutputStream stream, String shardId, ObjectMapper objectMapper, ExecutorService executorService) { + return this.initialize( + new BufferedWriter(new OutputStreamWriter(stream)), shardId, objectMapper, executorService); } /** @@ -221,15 +220,12 @@ class MessageWriter { * @param objectMapper The object mapper to encode messages. * @param executorService An executor service to run tasks in. */ - MessageWriter initialize(BufferedWriter writer, - String shardId, - ObjectMapper objectMapper, - ExecutorService executorService) { + MessageWriter initialize( + BufferedWriter writer, String shardId, ObjectMapper objectMapper, ExecutorService executorService) { this.writer = writer; this.shardId = shardId; this.objectMapper = objectMapper; this.executorService = executorService; return this; } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemon.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemon.java index b056e21f..4588b246 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemon.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemon.java @@ -26,20 +26,18 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import ch.qos.logback.classic.LoggerContext; +import ch.qos.logback.classic.joran.JoranConfigurator; +import ch.qos.logback.core.joran.spi.JoranException; +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; +import lombok.Data; +import lombok.experimental.Accessors; +import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.slf4j.LoggerFactory; - -import com.beust.jcommander.JCommander; -import com.beust.jcommander.Parameter; - -import ch.qos.logback.classic.LoggerContext; -import ch.qos.logback.classic.joran.JoranConfigurator; -import ch.qos.logback.core.joran.spi.JoranException; -import lombok.Data; -import lombok.experimental.Accessors; -import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.coordinator.Scheduler; /** @@ -75,11 +73,14 @@ public class MultiLangDaemon { @Parameter List parameters = new ArrayList<>(); - @Parameter(names = { "-p", "--properties-file" }, description = "Properties file to be used with the KCL") + @Parameter( + names = {"-p", "--properties-file"}, + description = "Properties file to be used with the KCL") String propertiesFile; - @Parameter(names = { "-l", - "--log-configuration" }, description = "File location of logback.xml to be override the default") + @Parameter( + names = {"-l", "--log-configuration"}, + description = "File location of logback.xml to be override the default") String logConfiguration; } @@ -102,7 +103,8 @@ public class MultiLangDaemon { } JCommander buildJCommanderAndParseArgs(final MultiLangDaemonArguments arguments, final String[] args) { - JCommander jCommander = JCommander.newBuilder().programName("amazon-kinesis-client MultiLangDaemon") + JCommander jCommander = JCommander.newBuilder() + .programName("amazon-kinesis-client MultiLangDaemon") .addObject(arguments) .build(); jCommander.parse(args); @@ -128,8 +130,8 @@ public class MultiLangDaemon { } } - void configureLogging(final String logConfiguration, final LoggerContext loggerContext, - final JoranConfigurator configurator) { + void configureLogging( + final String logConfiguration, final LoggerContext loggerContext, final JoranConfigurator configurator) { loggerContext.reset(); try (InputStream inputStream = FileUtils.openInputStream(new File(logConfiguration))) { configurator.setContext(loggerContext); @@ -146,9 +148,8 @@ public class MultiLangDaemon { if (arguments.parameters.size() == 1) { propertiesFile = arguments.parameters.get(0); } else { - throw new RuntimeException( - "Expected a single argument, but found multiple arguments. Arguments: " - + String.join(", ", arguments.parameters)); + throw new RuntimeException("Expected a single argument, but found multiple arguments. Arguments: " + + String.join(", ", arguments.parameters)); } } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemonConfig.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemonConfig.java index 4d3a408f..aaba66f0 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemonConfig.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemonConfig.java @@ -26,10 +26,9 @@ import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import software.amazon.kinesis.multilang.config.KinesisClientLibConfigurator; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.multilang.config.KinesisClientLibConfigurator; import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; import software.amazon.kinesis.retrieval.RetrievalConfig; @@ -45,15 +44,15 @@ public class MultiLangDaemonConfig { private static final String PROP_PROCESSING_LANGUAGE = "processingLanguage"; private static final String PROP_MAX_ACTIVE_THREADS = "maxActiveThreads"; - private MultiLangDaemonConfiguration multiLangDaemonConfiguration; + private final MultiLangDaemonConfiguration multiLangDaemonConfiguration; - private ExecutorService executorService; + private final ExecutorService executorService; - private MultiLangRecordProcessorFactory recordProcessorFactory; + private final MultiLangRecordProcessorFactory recordProcessorFactory; /** * Constructor. - * + * * @param propertiesFile * The location of the properties file. * @throws IOException @@ -66,7 +65,7 @@ public class MultiLangDaemonConfig { } /** - * + * * @param propertiesFile * The location of the properties file. * @param classLoader @@ -82,7 +81,7 @@ public class MultiLangDaemonConfig { } /** - * + * * @param propertiesFile * The location of the properties file. * @param classLoader @@ -94,8 +93,9 @@ public class MultiLangDaemonConfig { * @throws IllegalArgumentException * Thrown when the contents of the properties file are not as expected. */ - public MultiLangDaemonConfig(String propertiesFile, ClassLoader classLoader, - KinesisClientLibConfigurator configurator) throws IOException, IllegalArgumentException { + public MultiLangDaemonConfig( + String propertiesFile, ClassLoader classLoader, KinesisClientLibConfigurator configurator) + throws IOException, IllegalArgumentException { Properties properties = loadProperties(classLoader, propertiesFile); if (!validateProperties(properties)) { throw new IllegalArgumentException( @@ -107,11 +107,14 @@ public class MultiLangDaemonConfig { multiLangDaemonConfiguration = configurator.getConfiguration(properties); executorService = buildExecutorService(properties); - recordProcessorFactory = new MultiLangRecordProcessorFactory(executableName, executorService, - multiLangDaemonConfiguration); + recordProcessorFactory = + new MultiLangRecordProcessorFactory(executableName, executorService, multiLangDaemonConfiguration); - log.info("Running {} to process stream {} with executable {}", multiLangDaemonConfiguration.getApplicationName(), - multiLangDaemonConfiguration.getStreamName(), executableName); + log.info( + "Running {} to process stream {} with executable {}", + multiLangDaemonConfiguration.getApplicationName(), + multiLangDaemonConfiguration.getStreamName(), + executableName); prepare(processingLanguage); } @@ -138,7 +141,7 @@ public class MultiLangDaemonConfig { } log.info("MultiLangDaemon is adding the following fields to the User Agent: {}", userAgent.toString()); -// multiLangDaemonConfiguration.withUserAgent(userAgent.toString()); + // multiLangDaemonConfiguration.withUserAgent(userAgent.toString()); } private static Properties loadProperties(ClassLoader classLoader, String propertiesFileName) throws IOException { @@ -165,7 +168,6 @@ public class MultiLangDaemonConfig { propertyStream.close(); } } - } private static boolean validateProperties(Properties properties) { @@ -182,17 +184,22 @@ public class MultiLangDaemonConfig { log.debug("Value for {} property is {}", PROP_MAX_ACTIVE_THREADS, maxActiveThreads); if (maxActiveThreads <= 0) { log.info("Using a cached thread pool."); - return new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue(), - builder.build()); + return new ThreadPoolExecutor( + 0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<>(), builder.build()); } else { log.info("Using a fixed thread pool with {} max active threads.", maxActiveThreads); - return new ThreadPoolExecutor(maxActiveThreads, maxActiveThreads, 0L, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), builder.build()); + return new ThreadPoolExecutor( + maxActiveThreads, + maxActiveThreads, + 0L, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>(), + builder.build()); } } /** - * + * * @return A KinesisClientLibConfiguration object based on the properties file provided. */ public MultiLangDaemonConfiguration getMultiLangDaemonConfiguration() { @@ -200,7 +207,7 @@ public class MultiLangDaemonConfig { } /** - * + * * @return An executor service based on the properties file provided. */ public ExecutorService getExecutorService() { @@ -208,7 +215,7 @@ public class MultiLangDaemonConfig { } /** - * + * * @return A MultiLangRecordProcessorFactory based on the properties file provided. */ public MultiLangRecordProcessorFactory getRecordProcessorFactory() { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangProtocol.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangProtocol.java index 66a6ae9a..46ede873 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangProtocol.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangProtocol.java @@ -61,8 +61,11 @@ class MultiLangProtocol { * @param initializationInput * information about the shard this processor is starting to process */ - MultiLangProtocol(MessageReader messageReader, MessageWriter messageWriter, - InitializationInput initializationInput, MultiLangDaemonConfiguration configuration) { + MultiLangProtocol( + MessageReader messageReader, + MessageWriter messageWriter, + InitializationInput initializationInput, + MultiLangDaemonConfiguration configuration) { this.messageReader = messageReader; this.messageWriter = messageWriter; this.initializationInput = initializationInput; @@ -82,7 +85,6 @@ class MultiLangProtocol { */ Future writeFuture = messageWriter.writeInitializeMessage(initializationInput); return waitForStatusMessage(InitializeMessage.ACTION, null, writeFuture); - } /** @@ -100,7 +102,7 @@ class MultiLangProtocol { /** * Notifies the client process that the lease has been lost, and it needs to shutdown. - * + * * @param leaseLostInput * the lease lost input that is passed to the {@link MessageWriter} * @return true if the message was successfully writtem @@ -115,7 +117,9 @@ class MultiLangProtocol { * @return */ boolean shardEnded(ShardEndedInput shardEndedInput) { - return waitForStatusMessage(ShardEndedMessage.ACTION, shardEndedInput.checkpointer(), + return waitForStatusMessage( + ShardEndedMessage.ACTION, + shardEndedInput.checkpointer(), messageWriter.writeShardEndedMessage(shardEndedInput)); } @@ -147,8 +151,8 @@ class MultiLangProtocol { * The writing task. * @return Whether or not this operation succeeded. */ - private boolean waitForStatusMessage(String action, RecordProcessorCheckpointer checkpointer, - Future writeFuture) { + private boolean waitForStatusMessage( + String action, RecordProcessorCheckpointer checkpointer, Future writeFuture) { boolean statusWasCorrect = waitForStatusMessage(action, checkpointer); // Examine whether or not we failed somewhere along the line. @@ -194,7 +198,7 @@ class MultiLangProtocol { return false; } - statusMessage = message.filter(m -> m instanceof StatusMessage).map(m -> (StatusMessage) m ); + statusMessage = message.filter(m -> m instanceof StatusMessage).map(m -> (StatusMessage) m); } return this.validateStatusMessage(statusMessage.get(), action); } @@ -207,13 +211,17 @@ class MultiLangProtocol { try { return Optional.of(fm.get()); } catch (InterruptedException e) { - log.error("Interrupted while waiting for {} message for shard {}", action, - initializationInput.shardId(), e); + log.error( + "Interrupted while waiting for {} message for shard {}", action, initializationInput.shardId(), e); } catch (ExecutionException e) { - log.error("Failed to get status message for {} action for shard {}", action, - initializationInput.shardId(), e); + log.error( + "Failed to get status message for {} action for shard {}", + action, + initializationInput.shardId(), + e); } catch (TimeoutException e) { - log.error("Timedout to get status message for {} action for shard {}. Terminating...", + log.error( + "Timedout to get status message for {} action for shard {}. Terminating...", action, initializationInput.shardId(), e); @@ -240,11 +248,14 @@ class MultiLangProtocol { * @return Whether or not this operation succeeded. */ private boolean validateStatusMessage(StatusMessage statusMessage, String action) { - log.info("Received response {} from subprocess while waiting for {}" - + " while processing shard {}", statusMessage, action, initializationInput.shardId()); - return !(statusMessage == null || statusMessage.getResponseFor() == null || !statusMessage.getResponseFor() - .equals(action)); - + log.info( + "Received response {} from subprocess while waiting for {}" + " while processing shard {}", + statusMessage, + action, + initializationInput.shardId()); + return !(statusMessage == null + || statusMessage.getResponseFor() == null + || !statusMessage.getResponseFor().equals(action)); } /** @@ -274,13 +285,12 @@ class MultiLangProtocol { } return this.messageWriter.writeCheckpointMessageWithError(sequenceNumber, subSequenceNumber, null); } else { - String message = - String.format("Was asked to checkpoint at %s but no checkpointer was provided for shard %s", - sequenceNumber, initializationInput.shardId()); + String message = String.format( + "Was asked to checkpoint at %s but no checkpointer was provided for shard %s", + sequenceNumber, initializationInput.shardId()); log.error(message); - return this.messageWriter.writeCheckpointMessageWithError(sequenceNumber, subSequenceNumber, - new InvalidStateException( - message)); + return this.messageWriter.writeCheckpointMessageWithError( + sequenceNumber, subSequenceNumber, new InvalidStateException(message)); } } catch (Throwable t) { return this.messageWriter.writeCheckpointMessageWithError(sequenceNumber, subSequenceNumber, t); @@ -288,8 +298,8 @@ class MultiLangProtocol { } private String logCheckpointMessage(String sequenceNumber, Long subSequenceNumber) { - return String.format("Attempting to checkpoint shard %s @ sequence number %s, and sub sequence number %s", + return String.format( + "Attempting to checkpoint shard %s @ sequence number %s, and sub sequence number %s", initializationInput.shardId(), sequenceNumber, subSequenceNumber); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangRecordProcessorFactory.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangRecordProcessorFactory.java index a1c01c51..56c555d7 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangRecordProcessorFactory.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangRecordProcessorFactory.java @@ -17,12 +17,10 @@ package software.amazon.kinesis.multilang; import java.util.concurrent.ExecutorService; import com.fasterxml.jackson.databind.ObjectMapper; - import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.processor.ShardRecordProcessor; +import software.amazon.kinesis.processor.ShardRecordProcessorFactory; /** * Creates {@link MultiLangShardRecordProcessor}'s. @@ -44,8 +42,8 @@ public class MultiLangRecordProcessorFactory implements ShardRecordProcessorFact * @param command The command that will do processing for this factory's record processors. * @param executorService An executor service to use while processing inputs and outputs of the child process. */ - public MultiLangRecordProcessorFactory(String command, ExecutorService executorService, - MultiLangDaemonConfiguration configuration) { + public MultiLangRecordProcessorFactory( + String command, ExecutorService executorService, MultiLangDaemonConfiguration configuration) { this(command, executorService, new ObjectMapper(), configuration); } @@ -54,8 +52,11 @@ public class MultiLangRecordProcessorFactory implements ShardRecordProcessorFact * @param executorService An executor service to use while processing inputs and outputs of the child process. * @param objectMapper An object mapper used to convert messages to json to be written to the child process */ - public MultiLangRecordProcessorFactory(String command, ExecutorService executorService, ObjectMapper objectMapper, - MultiLangDaemonConfiguration configuration) { + public MultiLangRecordProcessorFactory( + String command, + ExecutorService executorService, + ObjectMapper objectMapper, + MultiLangDaemonConfiguration configuration) { this.command = command; this.commandArray = command.split(COMMAND_DELIMETER_REGEX); this.executorService = executorService; @@ -69,8 +70,8 @@ public class MultiLangRecordProcessorFactory implements ShardRecordProcessorFact /* * Giving ProcessBuilder the command as an array of Strings allows users to specify command line arguments. */ - return new MultiLangShardRecordProcessor(new ProcessBuilder(commandArray), executorService, this.objectMapper, - this.configuration); + return new MultiLangShardRecordProcessor( + new ProcessBuilder(commandArray), executorService, this.objectMapper, this.configuration); } String[] getCommandArray() { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangShardRecordProcessor.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangShardRecordProcessor.java index 7b0eefe2..c6569cb9 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangShardRecordProcessor.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangShardRecordProcessor.java @@ -22,7 +22,6 @@ import java.util.concurrent.Future; import java.util.function.Function; import com.fasterxml.jackson.databind.ObjectMapper; - import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; @@ -32,7 +31,6 @@ import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; import software.amazon.kinesis.processor.ShardRecordProcessor; - /** * A record processor that manages creating a child process that implements the multi language protocol and connecting * that child process's input and outputs to a {@link MultiLangProtocol} object and calling the appropriate methods on @@ -50,20 +48,20 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { private Future stderrReadTask; - private MessageWriter messageWriter; - private MessageReader messageReader; - private DrainChildSTDERRTask readSTDERRTask; + private final MessageWriter messageWriter; + private final MessageReader messageReader; + private final DrainChildSTDERRTask readSTDERRTask; - private ProcessBuilder processBuilder; + private final ProcessBuilder processBuilder; private Process process; - private ExecutorService executorService; + private final ExecutorService executorService; private ProcessState state; - private ObjectMapper objectMapper; + private final ObjectMapper objectMapper; private MultiLangProtocol protocol; - private MultiLangDaemonConfiguration configuration; + private final MultiLangDaemonConfiguration configuration; @Override public void initialize(InitializationInput initializationInput) { @@ -157,8 +155,10 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { if (ProcessState.ACTIVE.equals(this.state)) { stopProcessing("Encountered an error while trying to shutdown child process", t); } else { - stopProcessing("Encountered an error during shutdown," - + " but it appears the processor has already been shutdown", t); + stopProcessing( + "Encountered an error during shutdown," + + " but it appears the processor has already been shutdown", + t); } } } @@ -167,12 +167,13 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { * Used to tell whether the processor has been shutdown already. */ private enum ProcessState { - ACTIVE, SHUTDOWN + ACTIVE, + SHUTDOWN } /** * Constructor. - * + * * @param processBuilder * Provides process builder functionality. * @param executorService @@ -180,15 +181,24 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { * @param objectMapper * An obejct mapper. */ - MultiLangShardRecordProcessor(ProcessBuilder processBuilder, ExecutorService executorService, - ObjectMapper objectMapper, MultiLangDaemonConfiguration configuration) { - this(processBuilder, executorService, objectMapper, new MessageWriter(), new MessageReader(), - new DrainChildSTDERRTask(), configuration); + MultiLangShardRecordProcessor( + ProcessBuilder processBuilder, + ExecutorService executorService, + ObjectMapper objectMapper, + MultiLangDaemonConfiguration configuration) { + this( + processBuilder, + executorService, + objectMapper, + new MessageWriter(), + new MessageReader(), + new DrainChildSTDERRTask(), + configuration); } /** * Note: This constructor has package level access solely for testing purposes. - * + * * @param processBuilder * Provides the child process for this record processor * @param executorService @@ -202,9 +212,14 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { * @param readSTDERRTask * Error reader to read from child process's stderr */ - MultiLangShardRecordProcessor(ProcessBuilder processBuilder, ExecutorService executorService, ObjectMapper objectMapper, - MessageWriter messageWriter, MessageReader messageReader, DrainChildSTDERRTask readSTDERRTask, - MultiLangDaemonConfiguration configuration) { + MultiLangShardRecordProcessor( + ProcessBuilder processBuilder, + ExecutorService executorService, + ObjectMapper objectMapper, + MessageWriter messageWriter, + MessageReader messageReader, + DrainChildSTDERRTask readSTDERRTask, + MultiLangDaemonConfiguration configuration) { this.executorService = executorService; this.processBuilder = processBuilder; this.objectMapper = objectMapper; @@ -213,7 +228,6 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { this.readSTDERRTask = readSTDERRTask; this.configuration = configuration; - this.state = ProcessState.ACTIVE; } @@ -270,7 +284,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { /** * Convenience method used by {@link #childProcessShutdownSequence()} to drain the STDIN and STDERR of the child * process. - * + * * @param future A future to wait on. * @param whatThisFutureIsDoing What that future is doing while we wait. */ @@ -285,7 +299,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { /** * Convenience method for logging and safely shutting down so that we don't throw an exception up to the KCL on * accident. - * + * * @param message The reason we are stopping processing. * @param reason An exception that caused us to want to stop processing. */ @@ -303,8 +317,6 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { /** * We provide a package level method for unit testing this call to exit. - * - * @param val exit value */ void exit() { System.exit(EXIT_VALUE); @@ -313,7 +325,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { /** * The {@link ProcessBuilder} class is final so not easily mocked. We wrap the only interaction we have with it in * this package level method to permit unit testing. - * + * * @return The process started by processBuilder * @throws IOException If the process can't be started. */ diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyKey.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyKey.java new file mode 100644 index 00000000..19211822 --- /dev/null +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyKey.java @@ -0,0 +1,142 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package software.amazon.kinesis.multilang; + +import java.util.HashMap; +import java.util.Map; + +import com.amazonaws.regions.Regions; +import com.google.common.base.CaseFormat; +import lombok.AccessLevel; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +/** + * Key-Value pairs which may be nested in, and extracted from, a property value + * in a Java properties file. For example, given the line in a property file of + * {@code my_key = my_value|foo=bar} and a delimiter split on {@code |} (pipe), + * the value {@code my_value|foo=bar} would have a nested key of {@code foo} + * and its corresponding value is {@code bar}. + *

    + * The order of nested properties does not matter, and these properties are optional. + * Customers may choose to provide, in any order, zero-or-more nested properties. + *

    + * Duplicate keys are not supported, and may result in a last-write-wins outcome. + */ +@Slf4j +public enum NestedPropertyKey { + + /** + * Specify the service endpoint where requests will be submitted. + * This property's value must be in the following format: + *
    +     *     ENDPOINT ::= SERVICE_ENDPOINT "^" SIGNING_REGION
    +     *     SERVICE_ENDPOINT ::= URL
    +     *     SIGNING_REGION ::= AWS_REGION
    +     * 
    + * + * It would be redundant to provide both this and {@link #ENDPOINT_REGION}. + * + * @see #ENDPOINT_REGION + * @see AWS Service endpoints + * @see Available Regions + */ + ENDPOINT { + void visit(final NestedPropertyProcessor processor, final String endpoint) { + final String[] tokens = endpoint.split("\\^"); + if (tokens.length != 2) { + throw new IllegalArgumentException("Invalid " + name() + ": " + endpoint); + } + processor.acceptEndpoint(tokens[0], tokens[1]); + } + }, + + /** + * Specify the region where service requests will be submitted. This + * region will determine both the service endpoint and signing region. + *

    + * It would be redundant to provide both this and {@link #ENDPOINT}. + * + * @see #ENDPOINT + * @see Available Regions + */ + ENDPOINT_REGION { + void visit(final NestedPropertyProcessor processor, final String region) { + processor.acceptEndpointRegion(Regions.fromName(region)); + } + }, + + /** + * External ids may be used when delegating access in a multi-tenant + * environment, or to third parties. + * + * @see + * How to use an external ID when granting access to your AWS resources to a third party + */ + EXTERNAL_ID { + void visit(final NestedPropertyProcessor processor, final String externalId) { + processor.acceptExternalId(externalId); + } + }, + ; + + /** + * Nested key within the property value. For example, a nested key-value + * of {@code foo=bar} has a nested key of {@code foo}. + */ + @Getter(AccessLevel.PACKAGE) + private final String nestedKey; + + NestedPropertyKey() { + // convert the enum from UPPER_SNAKE_CASE to lowerCamelCase + nestedKey = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, name()); + } + + abstract void visit(NestedPropertyProcessor processor, String value); + + /** + * Parses any number of parameters. Each nested property will prompt a + * visit to the {@code processor}. + * + * @param processor processor to be invoked for every nested property + * @param params parameters to check for a nested property key + */ + public static void parse(final NestedPropertyProcessor processor, final String... params) { + // Construct a disposable cache to keep this O(n). Since parsing is + // usually one-and-done, it's wasteful to maintain this cache in perpetuity. + final Map cachedKeys = new HashMap<>(); + for (final NestedPropertyKey npk : values()) { + cachedKeys.put(npk.getNestedKey(), npk); + } + + for (final String param : params) { + if (param != null) { + final String[] tokens = param.split("="); + if (tokens.length == 2) { + final NestedPropertyKey npk = cachedKeys.get(tokens[0]); + if (npk != null) { + npk.visit(processor, tokens[1]); + } else { + log.warn("Unsupported nested key: {}", param); + } + } else if (tokens.length > 2) { + log.warn("Malformed nested key: {}", param); + } else { + log.info("Parameter is not a nested key: {}", param); + } + } + } + } +} diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyProcessor.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyProcessor.java new file mode 100644 index 00000000..f7587297 --- /dev/null +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyProcessor.java @@ -0,0 +1,53 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package software.amazon.kinesis.multilang; + +import com.amazonaws.regions.Regions; + +/** + * Defines methods to process {@link NestedPropertyKey}s. + */ +public interface NestedPropertyProcessor { + + /** + * Set the service endpoint where requests are sent. + * + * @param serviceEndpoint the service endpoint either with or without the protocol + * (e.g., https://sns.us-west-1.amazonaws.com, sns.us-west-1.amazonaws.com) + * @param signingRegion the region to use for SigV4 signing of requests (e.g. us-west-1) + * + * @see #acceptEndpointRegion(Regions) + * @see + * AwsClientBuilder.EndpointConfiguration + */ + void acceptEndpoint(String serviceEndpoint, String signingRegion); + + /** + * Set the service endpoint where requests are sent. + * + * @param region Region to be used by the client. This will be used to determine both the service endpoint + * (e.g., https://sns.us-west-1.amazonaws.com) and signing region (e.g., us-west-1) for requests. + * + * @see #acceptEndpoint(String, String) + */ + void acceptEndpointRegion(Regions region); + + /** + * Set the external id, an optional field to designate who can assume an IAM role. + * + * @param externalId external id used in the service call used to retrieve session credentials + */ + void acceptExternalId(String externalId); +} diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProvider.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProvider.java new file mode 100644 index 00000000..b5b9f924 --- /dev/null +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProvider.java @@ -0,0 +1,86 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package software.amazon.kinesis.multilang.auth; + +import java.util.Arrays; + +import com.amazonaws.auth.AWSSessionCredentials; +import com.amazonaws.auth.AWSSessionCredentialsProvider; +import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider; +import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider.Builder; +import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; +import com.amazonaws.regions.Regions; +import com.amazonaws.services.securitytoken.AWSSecurityTokenService; +import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient; +import software.amazon.kinesis.multilang.NestedPropertyKey; +import software.amazon.kinesis.multilang.NestedPropertyProcessor; + +/** + * An {@link AWSSessionCredentialsProvider} that is backed by STSAssumeRole. + */ +public class KclSTSAssumeRoleSessionCredentialsProvider + implements AWSSessionCredentialsProvider, NestedPropertyProcessor { + + private final Builder builder; + + private final STSAssumeRoleSessionCredentialsProvider provider; + + /** + * + * @param params vararg parameters which must include roleArn at index=0, + * and roleSessionName at index=1 + */ + public KclSTSAssumeRoleSessionCredentialsProvider(final String[] params) { + this(params[0], params[1], Arrays.copyOfRange(params, 2, params.length)); + } + + public KclSTSAssumeRoleSessionCredentialsProvider( + final String roleArn, final String roleSessionName, final String... params) { + builder = new Builder(roleArn, roleSessionName); + NestedPropertyKey.parse(this, params); + provider = builder.build(); + } + + @Override + public AWSSessionCredentials getCredentials() { + return provider.getCredentials(); + } + + @Override + public void refresh() { + // do nothing + } + + @Override + public void acceptEndpoint(final String serviceEndpoint, final String signingRegion) { + final EndpointConfiguration endpoint = new EndpointConfiguration(serviceEndpoint, signingRegion); + final AWSSecurityTokenService stsClient = AWSSecurityTokenServiceClient.builder() + .withEndpointConfiguration(endpoint) + .build(); + builder.withStsClient(stsClient); + } + + @Override + public void acceptEndpointRegion(final Regions region) { + final AWSSecurityTokenService stsClient = + AWSSecurityTokenServiceClient.builder().withRegion(region).build(); + builder.withStsClient(stsClient); + } + + @Override + public void acceptExternalId(final String externalId) { + builder.withExternalId(externalId); + } +} diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java index da3db4fb..8110d4f7 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java @@ -14,31 +14,30 @@ */ package software.amazon.kinesis.multilang.config; -import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSCredentialsProviderChain; import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; /** * Get AWSCredentialsProvider property. */ @Slf4j class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecoder { - private static final String AUTH_PREFIX = "com.amazonaws.auth."; private static final String LIST_DELIMITER = ","; private static final String ARG_DELIMITER = "|"; /** * Constructor. */ - AWSCredentialsProviderPropertyValueDecoder() { - } + AWSCredentialsProviderPropertyValueDecoder() {} /** * Get AWSCredentialsProvider property. @@ -65,35 +64,58 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode */ @Override public List> getSupportedTypes() { - return Arrays.asList(AWSCredentialsProvider.class); + return Collections.singletonList(AWSCredentialsProvider.class); } - /* + /** * Convert string list to a list of valid credentials providers. */ private static List getValidCredentialsProviders(List providerNames) { List credentialsProviders = new ArrayList<>(); + for (String providerName : providerNames) { - if (providerName.contains(ARG_DELIMITER)) { - String[] nameAndArgs = providerName.split("\\" + ARG_DELIMITER); - Class[] argTypes = new Class[nameAndArgs.length - 1]; - Arrays.fill(argTypes, String.class); - try { - Class className = Class.forName(nameAndArgs[0]); - Constructor c = className.getConstructor(argTypes); - credentialsProviders.add((AWSCredentialsProvider) c - .newInstance(Arrays.copyOfRange(nameAndArgs, 1, nameAndArgs.length))); - } catch (Exception e) { - log.debug("Can't find any credentials provider matching {}.", providerName); + final String[] nameAndArgs = providerName.split("\\" + ARG_DELIMITER); + final Class clazz; + try { + final Class c = Class.forName(nameAndArgs[0]); + if (!AWSCredentialsProvider.class.isAssignableFrom(c)) { + continue; } - } else { - try { - Class className = Class.forName(providerName); - credentialsProviders.add((AWSCredentialsProvider) className.newInstance()); - } catch (Exception e) { - log.debug("Can't find any credentials provider matching {}.", providerName); + clazz = (Class) c; + } catch (ClassNotFoundException cnfe) { + // Providers are a product of prefixed Strings to cover multiple + // namespaces (e.g., "Foo" -> { "some.auth.Foo", "kcl.auth.Foo" }). + // It's expected that many class names will not resolve. + continue; + } + log.info("Attempting to construct {}", clazz); + + AWSCredentialsProvider provider = null; + if (nameAndArgs.length > 1) { + final String[] varargs = Arrays.copyOfRange(nameAndArgs, 1, nameAndArgs.length); + + // attempt to invoke an explicit N-arg constructor of FooClass(String, String, ...) + provider = constructProvider(providerName, () -> { + Class[] argTypes = new Class[nameAndArgs.length - 1]; + Arrays.fill(argTypes, String.class); + return clazz.getConstructor(argTypes).newInstance(varargs); + }); + + if (provider == null) { + // attempt to invoke a public varargs/array constructor of FooClass(String[]) + provider = constructProvider(providerName, () -> clazz.getConstructor(String[].class) + .newInstance((Object) varargs)); } } + + if (provider == null) { + // regardless of parameters, fallback to invoke a public no-arg constructor + provider = constructProvider(providerName, clazz::newInstance); + } + + if (provider != null) { + credentialsProviders.add(provider); + } } return credentialsProviders; } @@ -101,7 +123,7 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode private static List getProviderNames(String property) { // assume list delimiter is "," String[] elements = property.split(LIST_DELIMITER); - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < elements.length; i++) { String string = elements[i].trim(); if (!string.isEmpty()) { @@ -112,20 +134,49 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode return result; } - private static List getPossibleFullClassNames(String s) { - /* - * We take care of three cases : - * - * 1. Customer provides a short name of common providers in com.amazonaws.auth package i.e. any classes - * implementing the AWSCredentialsProvider interface: - * http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html - * - * 2. Customer provides a full name of common providers e.g. com.amazonaws.auth.ClasspathFileCredentialsProvider - * - * 3. Customer provides a custom credentials provider with full name of provider - */ + private static List getPossibleFullClassNames(final String provider) { + return Stream.of( + // Customer provides a short name of common providers in com.amazonaws.auth package + // (e.g., any classes implementing the AWSCredentialsProvider interface) + // @see + // http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html + "com.amazonaws.auth.", - return Arrays.asList(s, AUTH_PREFIX + s); + // Customer provides a short name of a provider offered by this multi-lang package + "software.amazon.kinesis.multilang.auth.", + + // Customer provides a fully-qualified provider name, or a custom credentials provider + // (e.g., com.amazonaws.auth.ClasspathFileCredentialsProvider, org.mycompany.FooProvider) + "") + .map(prefix -> prefix + provider) + .collect(Collectors.toList()); } + @FunctionalInterface + private interface CredentialsProviderConstructor { + T construct() + throws IllegalAccessException, InstantiationException, InvocationTargetException, NoSuchMethodException; + } + + /** + * Attempts to construct an {@link AWSCredentialsProvider}. + * + * @param providerName Raw, unmodified provider name. Should there be an + * Exeception during construction, this parameter will be logged. + * @param constructor supplier-like function that will perform the construction + * @return the constructed provider, if successful; otherwise, null + * + * @param type of the CredentialsProvider to construct + */ + private static T constructProvider( + final String providerName, final CredentialsProviderConstructor constructor) { + try { + return constructor.construct(); + } catch (NoSuchMethodException ignored) { + // ignore + } catch (IllegalAccessException | InstantiationException | InvocationTargetException | RuntimeException e) { + log.warn("Failed to construct {}", providerName, e); + } + return null; + } } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/BuilderDynaBean.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/BuilderDynaBean.java index 2e5502cd..927655c1 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/BuilderDynaBean.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/BuilderDynaBean.java @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.List; import java.util.Optional; import java.util.function.Function; -import java.util.function.Supplier; import lombok.Getter; import org.apache.commons.beanutils.ConvertUtilsBean; @@ -32,7 +31,7 @@ import org.apache.commons.lang3.StringUtils; public class BuilderDynaBean implements DynaBean { - private static final String[] CLASS_NAME_JOINERS = { ClassUtils.PACKAGE_SEPARATOR, ClassUtils.INNER_CLASS_SEPARATOR }; + private static final String[] CLASS_NAME_JOINERS = {ClassUtils.PACKAGE_SEPARATOR, ClassUtils.INNER_CLASS_SEPARATOR}; static final String NO_MAP_ACCESS_SUPPORT = "Map access isn't supported"; private Class destinedClass; @@ -52,16 +51,22 @@ public class BuilderDynaBean implements DynaBean { this(destinedClass, convertUtilsBean, null, Arrays.asList(classPrefixSearchList)); } - public BuilderDynaBean(Class destinedClass, ConvertUtilsBean convertUtilsBean, - Function emptyPropertyHandler, String... classPrefixSearchList) { + public BuilderDynaBean( + Class destinedClass, + ConvertUtilsBean convertUtilsBean, + Function emptyPropertyHandler, + String... classPrefixSearchList) { this(destinedClass, convertUtilsBean, emptyPropertyHandler, Arrays.asList(classPrefixSearchList)); } - public BuilderDynaBean(Class destinedClass, ConvertUtilsBean convertUtilsBean, - Function emtpyPropertyHandler, List classPrefixSearchList) { + public BuilderDynaBean( + Class destinedClass, + ConvertUtilsBean convertUtilsBean, + Function emptyPropertyHandler, + List classPrefixSearchList) { this.convertUtilsBean = convertUtilsBean; this.classPrefixSearchList = classPrefixSearchList; - this.emptyPropertyHandler = emtpyPropertyHandler; + this.emptyPropertyHandler = emptyPropertyHandler; initialize(destinedClass); } @@ -103,7 +108,6 @@ public class BuilderDynaBean implements DynaBean { // Ignored // } - } } } @@ -150,7 +154,6 @@ public class BuilderDynaBean implements DynaBean { } else { return expected.cast(dynaBeanCreateSupport.build()); } - } private void validateResolvedEmptyHandler() { @@ -216,8 +219,10 @@ public class BuilderDynaBean implements DynaBean { validateCanBuildOrCreate(); List types = dynaBeanBuilderSupport.getProperty(name); if (types.size() > 1) { - Optional arrayType = types.stream().filter(t -> t.type.isArray()).findFirst(); - return arrayType.map(t -> new DynaProperty(name, t.type, t.type.getComponentType())) + Optional arrayType = + types.stream().filter(t -> t.type.isArray()).findFirst(); + return arrayType + .map(t -> new DynaProperty(name, t.type, t.type.getComponentType())) .orElseGet(() -> new DynaProperty(name)); } else { TypeTag type = types.get(0); @@ -234,7 +239,8 @@ public class BuilderDynaBean implements DynaBean { @Override public DynaProperty[] getDynaProperties() { validateCanBuildOrCreate(); - return dynaBeanBuilderSupport.getPropertyNames().stream().map(this::getDynaProperty) + return dynaBeanBuilderSupport.getPropertyNames().stream() + .map(this::getDynaProperty) .toArray(DynaProperty[]::new); } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettable.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettable.java index 381137eb..eef1e1c2 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettable.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettable.java @@ -28,21 +28,21 @@ public @interface ConfigurationSettable { /** * Which builder this option applies to - * + * * @return the class of the builder to use */ Class configurationClass(); /** * The method name on the builder, defaults to the fieldName - * + * * @return the name of the method or null to use the default */ String methodName() default ""; /** * If the type is actually an optional value this will enable conversions - * + * * @return true if the value should be wrapped by an optional */ boolean convertToOptional() default false; diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtils.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtils.java index 390d8b15..c6d58807 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtils.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtils.java @@ -23,13 +23,11 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import com.google.common.base.Defaults; +import lombok.NonNull; import org.apache.commons.lang3.ClassUtils; import org.apache.commons.lang3.StringUtils; -import com.google.common.base.Defaults; - -import lombok.NonNull; - public class ConfigurationSettableUtils { public static T resolveFields(@NonNull Object source, @NonNull T configObject) { @@ -40,8 +38,8 @@ public class ConfigurationSettableUtils { return configObject; } - public static void resolveFields(Object source, Map, Object> configObjects, Set> restrictTo, - Set> skipIf) { + public static void resolveFields( + Object source, Map, Object> configObjects, Set> restrictTo, Set> skipIf) { for (Field field : source.getClass().getDeclaredFields()) { for (ConfigurationSettable b : field.getAnnotationsByType(ConfigurationSettable.class)) { if (restrictTo != null && !restrictTo.contains(b.configurationClass())) { @@ -70,9 +68,11 @@ public class ConfigurationSettableUtils { value = Optional.of(value); } if (ClassUtils.isPrimitiveOrWrapper(value.getClass())) { - Class primitiveType = field.getType().isPrimitive() ? field.getType() + Class primitiveType = field.getType().isPrimitive() + ? field.getType() : ClassUtils.wrapperToPrimitive(field.getType()); - Class wrapperType = !field.getType().isPrimitive() ? field.getType() + Class wrapperType = !field.getType().isPrimitive() + ? field.getType() : ClassUtils.primitiveToWrapper(field.getType()); try { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoder.java index fa91aa70..c2b1528e 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoder.java @@ -26,8 +26,7 @@ public class DatePropertyValueDecoder implements IPropertyValueDecoder { /** * Constructor. */ - DatePropertyValueDecoder() { - } + DatePropertyValueDecoder() {} /** * @param value property value as String @@ -49,5 +48,4 @@ public class DatePropertyValueDecoder implements IPropertyValueDecoder { public List> getSupportedTypes() { return Arrays.asList(Date.class); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanBuilderSupport.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanBuilderSupport.java index 0cc0073c..97f429f1 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanBuilderSupport.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanBuilderSupport.java @@ -29,11 +29,10 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; -import org.apache.commons.beanutils.ConvertUtilsBean; -import org.apache.commons.lang3.ClassUtils; - import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; +import org.apache.commons.beanutils.ConvertUtilsBean; +import org.apache.commons.lang3.ClassUtils; class DynaBeanBuilderSupport { @@ -48,8 +47,8 @@ class DynaBeanBuilderSupport { private final Multimap properties = HashMultimap.create(); private final Map values = new HashMap<>(); - DynaBeanBuilderSupport(Class destinedClass, ConvertUtilsBean convertUtilsBean, - List classPrefixSearchList) { + DynaBeanBuilderSupport( + Class destinedClass, ConvertUtilsBean convertUtilsBean, List classPrefixSearchList) { this.destinedClass = destinedClass; this.convertUtilsBean = convertUtilsBean; this.classPrefixSearchList = classPrefixSearchList; @@ -103,11 +102,12 @@ class DynaBeanBuilderSupport { private Object createForProperty(String name) { Optional type = properties.get(name).stream().findFirst(); return type.map(t -> { - if (DynaBeanBuilderUtils.isBuilderOrCreate(t.type) || !t.hasConverter) { - return new BuilderDynaBean(t.type, convertUtilsBean, null, classPrefixSearchList); - } - return null; - }).orElse(null); + if (DynaBeanBuilderUtils.isBuilderOrCreate(t.type) || !t.hasConverter) { + return new BuilderDynaBean(t.type, convertUtilsBean, null, classPrefixSearchList); + } + return null; + }) + .orElse(null); } boolean hasValue(String name) { @@ -157,8 +157,11 @@ class DynaBeanBuilderSupport { void set(String name, Object value) { if (value instanceof String && properties.get(name).stream().anyMatch(t -> t.type.isEnum())) { - TypeTag typeTag = properties.get(name).stream().filter(t -> t.type.isEnum()).findFirst().orElseThrow( - () -> new IllegalStateException("Expected enum type for " + name + ", but couldn't find it.")); + TypeTag typeTag = properties.get(name).stream() + .filter(t -> t.type.isEnum()) + .findFirst() + .orElseThrow(() -> + new IllegalStateException("Expected enum type for " + name + ", but couldn't find it.")); Class enumClass = (Class) typeTag.type; values.put(name, Enum.valueOf(enumClass, value.toString())); } else { @@ -174,9 +177,11 @@ class DynaBeanBuilderSupport { private Object getArgument(Map.Entry setValue) { Object argument = setValue.getValue(); if (argument instanceof Object[]) { - TypeTag arrayType = properties.get(setValue.getKey()).stream().filter(t -> t.type.isArray()).findFirst() - .orElseThrow(() -> new IllegalStateException(String - .format("Received Object[] for %s but can't find corresponding type", setValue.getKey()))); + TypeTag arrayType = properties.get(setValue.getKey()).stream() + .filter(t -> t.type.isArray()) + .findFirst() + .orElseThrow(() -> new IllegalStateException(String.format( + "Received Object[] for %s but can't find corresponding type", setValue.getKey()))); Object[] arrayValues = (Object[]) argument; Object[] destination = (Object[]) Array.newInstance(arrayType.type.getComponentType(), arrayValues.length); @@ -212,10 +217,12 @@ class DynaBeanBuilderSupport { for (Map.Entry setValue : values.entrySet()) { Object argument = getArgument(setValue); Method mutator = properties.get(setValue.getKey()).stream() - .filter(t -> ClassUtils.isAssignable(argument.getClass(), t.type)).findFirst() - .map(a -> a.builderMethod).orElseThrow( - () -> new IllegalStateException(String.format("Unable to find mutator for %s of type %s", - setValue.getKey(), argument.getClass().getName()))); + .filter(t -> ClassUtils.isAssignable(argument.getClass(), t.type)) + .findFirst() + .map(a -> a.builderMethod) + .orElseThrow(() -> new IllegalStateException(String.format( + "Unable to find mutator for %s of type %s", + setValue.getKey(), argument.getClass().getName()))); try { source = mutator.invoke(source, argument); } catch (IllegalAccessException | InvocationTargetException e) { @@ -236,7 +243,6 @@ class DynaBeanBuilderSupport { } catch (IllegalAccessException | NoSuchMethodException | InvocationTargetException e) { throw new RuntimeException(e); } - } Collection getPropertyNames() { @@ -249,5 +255,4 @@ class DynaBeanBuilderSupport { } return new ArrayList<>(properties.get(name)); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanCreateSupport.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanCreateSupport.java index 03c6e389..dda0b7ff 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanCreateSupport.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanCreateSupport.java @@ -30,8 +30,8 @@ class DynaBeanCreateSupport { private final List createTypes = new ArrayList<>(); private Object[] createValues = null; - DynaBeanCreateSupport(Class destinedClass, ConvertUtilsBean convertUtilsBean, - List classPrefixSearchList) { + DynaBeanCreateSupport( + Class destinedClass, ConvertUtilsBean convertUtilsBean, List classPrefixSearchList) { this.destinedClass = destinedClass; this.convertUtilsBean = convertUtilsBean; this.classPrefixSearchList = classPrefixSearchList; @@ -58,8 +58,8 @@ class DynaBeanCreateSupport { Object build() { - Method createMethod = DynaBeanBuilderUtils.getMethod(destinedClass, "create", - createTypes.stream().map(t -> t.type).toArray(i -> new Class[i])); + Method createMethod = DynaBeanBuilderUtils.getMethod( + destinedClass, "create", createTypes.stream().map(t -> t.type).toArray(i -> new Class[i])); Object arguments[] = new Object[createValues.length]; for (int i = 0; i < createValues.length; ++i) { if (createValues[i] instanceof BuilderDynaBean) { @@ -77,8 +77,8 @@ class DynaBeanCreateSupport { return createValues[index]; } else { if (createValues[index] == null) { - createValues[index] = new BuilderDynaBean(createTypes.get(index).type, convertUtilsBean, null, - classPrefixSearchList); + createValues[index] = new BuilderDynaBean( + createTypes.get(index).type, convertUtilsBean, null, classPrefixSearchList); } return createValues[index]; } @@ -89,13 +89,11 @@ class DynaBeanCreateSupport { public void set(String name, int index, Object value) { if (StringUtils.isEmpty(name)) { if (index >= createValues.length) { - throw new IllegalArgumentException( - String.format("%d exceeds the maximum number of arguments (%d) for %s", index, - createValues.length, destinedClass.getName())); + throw new IllegalArgumentException(String.format( + "%d exceeds the maximum number of arguments (%d) for %s", + index, createValues.length, destinedClass.getName())); } createValues[index] = value; } - } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/FanoutConfigBean.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/FanoutConfigBean.java index f9ab7044..a7478100 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/FanoutConfigBean.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/FanoutConfigBean.java @@ -26,21 +26,28 @@ public class FanoutConfigBean implements RetrievalConfigBuilder { @ConfigurationSettable(configurationClass = FanOutConfig.class) private int maxDescribeStreamSummaryRetries; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private String consumerArn; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private String consumerName; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private int maxDescribeStreamConsumerRetries; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private int registerStreamConsumerRetries; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private long retryBackoffMillis; @Override public FanOutConfig build(KinesisAsyncClient kinesisAsyncClient, MultiLangDaemonConfiguration parent) { - return ConfigurationSettableUtils.resolveFields(this, new FanOutConfig(kinesisAsyncClient).applicationName(parent.getApplicationName()) - .streamName(parent.getStreamName())); + return ConfigurationSettableUtils.resolveFields( + this, + new FanOutConfig(kinesisAsyncClient) + .applicationName(parent.getApplicationName()) + .streamName(parent.getStreamName())); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IPropertyValueDecoder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IPropertyValueDecoder.java index 111d4c63..0498874d 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IPropertyValueDecoder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IPropertyValueDecoder.java @@ -18,13 +18,13 @@ import java.util.List; /** * This class captures the concept of decoding a property value to a particular Java type. - * + * * @param */ interface IPropertyValueDecoder { /** * Get the value that was read from a configuration file and convert it to some type. - * + * * @param propertyValue property string value that needs to be decoded. * @return property value in type T */ @@ -32,7 +32,7 @@ interface IPropertyValueDecoder { /** * Get a list of supported types for this class. - * + * * @return list of supported classes. */ List> getSupportedTypes(); diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IntegerPropertyValueDecoder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IntegerPropertyValueDecoder.java index 88775cee..eb9fd0b8 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IntegerPropertyValueDecoder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IntegerPropertyValueDecoder.java @@ -25,8 +25,7 @@ class IntegerPropertyValueDecoder implements IPropertyValueDecoder { /** * Constructor. */ - IntegerPropertyValueDecoder() { - } + IntegerPropertyValueDecoder() {} /** * @param value property value as String diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfigurator.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfigurator.java index f3facdc0..42b617a0 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfigurator.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfigurator.java @@ -19,12 +19,12 @@ import java.io.InputStream; import java.lang.reflect.InvocationTargetException; import java.util.Properties; +import lombok.extern.slf4j.Slf4j; import org.apache.commons.beanutils.BeanUtilsBean; import org.apache.commons.beanutils.ConvertUtilsBean; - -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.arns.Arn; +import software.amazon.kinesis.common.StreamIdentifier; /** * KinesisClientLibConfigurator constructs a KinesisClientLibConfiguration from java properties file. The following @@ -40,7 +40,6 @@ public class KinesisClientLibConfigurator { private final BeanUtilsBean utilsBean; private final MultiLangDaemonConfiguration configuration; - /** * Constructor. */ @@ -55,13 +54,14 @@ public class KinesisClientLibConfigurator { * Program will fail immediately, if customer provide: 1) invalid variable value. Program will log it as warning and * continue, if customer provide: 1) variable with unsupported variable type. 2) a variable with name which does not * match any of the variables in KinesisClientLibConfigration. - * + * * @param properties a Properties object containing the configuration information * @return KinesisClientLibConfiguration */ public MultiLangDaemonConfiguration getConfiguration(Properties properties) { properties.entrySet().forEach(e -> { try { + log.info("Processing (key={}, value={})", e.getKey(), e.getValue()); utilsBean.setProperty(configuration, (String) e.getKey(), e.getValue()); } catch (IllegalAccessException | InvocationTargetException ex) { throw new RuntimeException(ex); @@ -69,8 +69,23 @@ public class KinesisClientLibConfigurator { }); Validate.notBlank(configuration.getApplicationName(), "Application name is required"); - Validate.notBlank(configuration.getStreamName(), "Stream name is required"); - Validate.isTrue(configuration.getKinesisCredentialsProvider().isDirty(), "A basic set of AWS credentials must be provided"); + + if (configuration.getStreamArn() != null + && !configuration.getStreamArn().trim().isEmpty()) { + final Arn streamArnObj = Arn.fromString(configuration.getStreamArn()); + StreamIdentifier.validateArn(streamArnObj); + // Parse out the stream Name from the Arn (and/or override existing value for Stream Name) + final String streamNameFromArn = streamArnObj.resource().resource(); + configuration.setStreamName(streamNameFromArn); + } + + Validate.notBlank( + configuration.getStreamName(), + "Stream name or Stream Arn is required. Stream Arn takes precedence if both are passed in."); + Validate.isTrue( + configuration.getKinesisCredentialsProvider().isDirty(), + "A basic set of AWS credentials must be provided"); + return configuration; } @@ -95,6 +110,4 @@ public class KinesisClientLibConfigurator { } return getConfiguration(properties); } - - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfiguration.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfiguration.java index da280ddf..3336be88 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfiguration.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfiguration.java @@ -27,18 +27,16 @@ import java.util.Set; import java.util.UUID; import java.util.function.Function; -import org.apache.commons.beanutils.BeanUtilsBean; -import org.apache.commons.beanutils.ConvertUtils; -import org.apache.commons.beanutils.ConvertUtilsBean; -import org.apache.commons.beanutils.Converter; -import org.apache.commons.beanutils.converters.ArrayConverter; -import org.apache.commons.beanutils.converters.StringConverter; - import lombok.Data; import lombok.Getter; import lombok.Setter; import lombok.experimental.Delegate; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.beanutils.BeanUtilsBean; +import org.apache.commons.beanutils.ConvertUtilsBean; +import org.apache.commons.beanutils.Converter; +import org.apache.commons.beanutils.converters.ArrayConverter; +import org.apache.commons.beanutils.converters.StringConverter; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; @@ -73,6 +71,7 @@ public class MultiLangDaemonConfiguration { private String applicationName; private String streamName; + private String streamArn; @ConfigurationSettable(configurationClass = ConfigsBuilder.class) private String tableName; @@ -85,20 +84,37 @@ public class MultiLangDaemonConfiguration { @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private long failoverTimeMillis; + + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) + private Boolean enablePriorityLeaseAssignment; + + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) + private Boolean leaseTableDeletionProtectionEnabled; + + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) + private Boolean leaseTablePitrEnabled; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private long shardSyncIntervalMillis; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private boolean cleanupLeasesUponShardCompletion; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private boolean ignoreUnexpectedChildShards; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int maxLeasesForWorker; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int maxLeasesToStealAtOneTime; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int initialLeaseTableReadCapacity; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int initialLeaseTableWriteCapacity; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class, methodName = "initialPositionInStream") @ConfigurationSettable(configurationClass = RetrievalConfig.class) private InitialPositionInStreamExtended initialPositionInStreamExtended; @@ -111,14 +127,16 @@ public class MultiLangDaemonConfiguration { } public void setInitialPositionInStream(InitialPositionInStream initialPositionInStream) { - this.initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(initialPositionInStream); + this.initialPositionInStreamExtended = + InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream); } @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int maxLeaseRenewalThreads; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private long listShardsBackoffTimeInMillis; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int maxListShardsRetryAttempts; @@ -128,10 +146,13 @@ public class MultiLangDaemonConfiguration { @ConfigurationSettable(configurationClass = CoordinatorConfig.class) private long parentShardPollIntervalMillis; + @ConfigurationSettable(configurationClass = CoordinatorConfig.class) private ShardPrioritization shardPrioritization; + @ConfigurationSettable(configurationClass = CoordinatorConfig.class) private boolean skipShardSyncAtWorkerInitializationIfLeasesExist; + @ConfigurationSettable(configurationClass = CoordinatorConfig.class) private long schedulerInitializationBackoffTimeMillis; @@ -140,12 +161,16 @@ public class MultiLangDaemonConfiguration { @ConfigurationSettable(configurationClass = MetricsConfig.class) private long metricsBufferTimeMillis; + @ConfigurationSettable(configurationClass = MetricsConfig.class) private int metricsMaxQueueSize; + @ConfigurationSettable(configurationClass = MetricsConfig.class) private MetricsLevel metricsLevel; + @ConfigurationSettable(configurationClass = LifecycleConfig.class, convertToOptional = true) private Long logWarningForTaskAfterMillis; + @ConfigurationSettable(configurationClass = MetricsConfig.class) private Set metricsEnabledDimensions; @@ -157,10 +182,10 @@ public class MultiLangDaemonConfiguration { metricsEnabledDimensions = new HashSet<>(Arrays.asList(dimensions)); } - private RetrievalMode retrievalMode = RetrievalMode.DEFAULT; private final FanoutConfigBean fanoutConfig = new FanoutConfigBean(); + @Delegate(types = PollingConfigBean.PollingConfigBeanDelegate.class) private final PollingConfigBean pollingConfig = new PollingConfigBean(); @@ -169,7 +194,6 @@ public class MultiLangDaemonConfiguration { private long shutdownGraceMillis; private Integer timeoutInSeconds; - private final BuilderDynaBean kinesisCredentialsProvider; public void setAWSCredentialsProvider(String providerString) { @@ -199,61 +223,75 @@ public class MultiLangDaemonConfiguration { this.utilsBean = utilsBean; this.convertUtilsBean = convertUtilsBean; - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - Date date = new Date(Long.parseLong(value.toString()) * 1000L); - return type.cast(InitialPositionInStreamExtended.newInitialPositionAtTimestamp(date)); - } - }, InitialPositionInStreamExtended.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + Date date = new Date(Long.parseLong(value.toString()) * 1000L); + return type.cast(InitialPositionInStreamExtended.newInitialPositionAtTimestamp(date)); + } + }, + InitialPositionInStreamExtended.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - return type.cast(MetricsLevel.valueOf(value.toString().toUpperCase())); - } - }, MetricsLevel.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + return type.cast(MetricsLevel.valueOf(value.toString().toUpperCase())); + } + }, + MetricsLevel.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - return type.cast(InitialPositionInStream.valueOf(value.toString().toUpperCase())); - } - }, InitialPositionInStream.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + return type.cast( + InitialPositionInStream.valueOf(value.toString().toUpperCase())); + } + }, + InitialPositionInStream.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - return type.cast(URI.create(value.toString())); - } - }, URI.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + return type.cast(URI.create(value.toString())); + } + }, + URI.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - return type.cast(RetrievalMode.from(value.toString())); - } - }, RetrievalMode.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + return type.cast(RetrievalMode.from(value.toString())); + } + }, + RetrievalMode.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(final Class type, final Object value) { - return type.cast(Region.of(value.toString())); - } - }, Region.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(final Class type, final Object value) { + return type.cast(Region.of(value.toString())); + } + }, + Region.class); ArrayConverter arrayConverter = new ArrayConverter(String[].class, new StringConverter()); arrayConverter.setDelimiter(','); convertUtilsBean.register(arrayConverter, String[].class); - AWSCredentialsProviderPropertyValueDecoder oldCredentialsDecoder = new AWSCredentialsProviderPropertyValueDecoder(); + AWSCredentialsProviderPropertyValueDecoder oldCredentialsDecoder = + new AWSCredentialsProviderPropertyValueDecoder(); Function converter = s -> new V2CredentialWrapper(oldCredentialsDecoder.decodeValue(s)); - this.kinesisCredentialsProvider = new BuilderDynaBean(AwsCredentialsProvider.class, convertUtilsBean, - converter, CREDENTIALS_DEFAULT_SEARCH_PATH); - this.dynamoDBCredentialsProvider = new BuilderDynaBean(AwsCredentialsProvider.class, convertUtilsBean, - converter, CREDENTIALS_DEFAULT_SEARCH_PATH); - this.cloudWatchCredentialsProvider = new BuilderDynaBean(AwsCredentialsProvider.class, convertUtilsBean, - converter, CREDENTIALS_DEFAULT_SEARCH_PATH); + this.kinesisCredentialsProvider = new BuilderDynaBean( + AwsCredentialsProvider.class, convertUtilsBean, converter, CREDENTIALS_DEFAULT_SEARCH_PATH); + this.dynamoDBCredentialsProvider = new BuilderDynaBean( + AwsCredentialsProvider.class, convertUtilsBean, converter, CREDENTIALS_DEFAULT_SEARCH_PATH); + this.cloudWatchCredentialsProvider = new BuilderDynaBean( + AwsCredentialsProvider.class, convertUtilsBean, converter, CREDENTIALS_DEFAULT_SEARCH_PATH); this.kinesisClient = new BuilderDynaBean(KinesisAsyncClient.class, convertUtilsBean); this.dynamoDbClient = new BuilderDynaBean(DynamoDbAsyncClient.class, convertUtilsBean); @@ -299,8 +337,8 @@ public class MultiLangDaemonConfiguration { return credsBuilder.build(AwsCredentialsProvider.class); } - private void updateCredentials(BuilderDynaBean toUpdate, AwsCredentialsProvider primary, - AwsCredentialsProvider secondary) { + private void updateCredentials( + BuilderDynaBean toUpdate, AwsCredentialsProvider primary, AwsCredentialsProvider secondary) { if (toUpdate.hasValue("credentialsProvider")) { return; @@ -328,8 +366,8 @@ public class MultiLangDaemonConfiguration { } private void handleRetrievalConfig(RetrievalConfig retrievalConfig, ConfigsBuilder configsBuilder) { - retrievalConfig - .retrievalSpecificConfig(retrievalMode.builder(this).build(configsBuilder.kinesisClient(), this)); + retrievalConfig.retrievalSpecificConfig( + retrievalMode.builder(this).build(configsBuilder.kinesisClient(), this)); } private Object adjustKinesisHttpConfiguration(Object builderObj) { @@ -352,8 +390,14 @@ public class MultiLangDaemonConfiguration { final RetrievalConfig retrievalConfig; public Scheduler build() { - return new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + return new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); } } @@ -366,19 +410,25 @@ public class MultiLangDaemonConfiguration { updateCredentials(dynamoDbClient, dynamoDbCreds, kinesisCreds); updateCredentials(cloudWatchClient, cloudwatchCreds, kinesisCreds); - KinesisAsyncClient kinesisAsyncClient = kinesisClient.build(KinesisAsyncClient.class, - this::adjustKinesisHttpConfiguration); + KinesisAsyncClient kinesisAsyncClient = + kinesisClient.build(KinesisAsyncClient.class, this::adjustKinesisHttpConfiguration); DynamoDbAsyncClient dynamoDbAsyncClient = dynamoDbClient.build(DynamoDbAsyncClient.class); CloudWatchAsyncClient cloudWatchAsyncClient = cloudWatchClient.build(CloudWatchAsyncClient.class); - ConfigsBuilder configsBuilder = new ConfigsBuilder(streamName, applicationName, kinesisAsyncClient, - dynamoDbAsyncClient, cloudWatchAsyncClient, workerIdentifier, shardRecordProcessorFactory); + ConfigsBuilder configsBuilder = new ConfigsBuilder( + streamName, + applicationName, + kinesisAsyncClient, + dynamoDbAsyncClient, + cloudWatchAsyncClient, + workerIdentifier, + shardRecordProcessorFactory); Map, Object> configObjects = new HashMap<>(); addConfigObjects(configObjects, configsBuilder); - resolveFields(configObjects, Collections.singleton(ConfigsBuilder.class), - Collections.singleton(PollingConfig.class)); + resolveFields( + configObjects, Collections.singleton(ConfigsBuilder.class), Collections.singleton(PollingConfig.class)); CoordinatorConfig coordinatorConfig = configsBuilder.coordinatorConfig(); CheckpointConfig checkpointConfig = configsBuilder.checkpointConfig(); @@ -388,19 +438,31 @@ public class MultiLangDaemonConfiguration { ProcessorConfig processorConfig = configsBuilder.processorConfig(); RetrievalConfig retrievalConfig = configsBuilder.retrievalConfig(); - addConfigObjects(configObjects, coordinatorConfig, checkpointConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + addConfigObjects( + configObjects, + coordinatorConfig, + checkpointConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); handleRetrievalConfig(retrievalConfig, configsBuilder); resolveFields(configObjects, null, new HashSet<>(Arrays.asList(ConfigsBuilder.class, PollingConfig.class))); - return new ResolvedConfiguration(coordinatorConfig, checkpointConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + return new ResolvedConfiguration( + coordinatorConfig, + checkpointConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); } public Scheduler build(ShardRecordProcessorFactory shardRecordProcessorFactory) { return resolvedConfiguration(shardRecordProcessorFactory).build(); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/PollingConfigBean.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/PollingConfigBean.java index 176efbad..64f24b16 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/PollingConfigBean.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/PollingConfigBean.java @@ -30,34 +30,44 @@ public class PollingConfigBean implements RetrievalConfigBuilder { interface PollingConfigBeanDelegate { Integer getRetryGetRecordsInSeconds(); + void setRetryGetRecordsInSeconds(Integer value); Integer getMaxGetRecordsThreadPool(); + void setMaxGetRecordsThreadPool(Integer value); long getIdleTimeBetweenReadsInMillis(); + void setIdleTimeBetweenReadsInMillis(long value); int getMaxRecords(); + void setMaxRecords(int value); } @ConfigurationSettable(configurationClass = PollingConfig.class, convertToOptional = true) private Integer retryGetRecordsInSeconds; + @ConfigurationSettable(configurationClass = PollingConfig.class, convertToOptional = true) private Integer maxGetRecordsThreadPool; + @ConfigurationSettable(configurationClass = PollingConfig.class) private long idleTimeBetweenReadsInMillis; + @ConfigurationSettable(configurationClass = PollingConfig.class) private int maxRecords; public boolean anyPropertiesSet() { - return retryGetRecordsInSeconds != null || maxGetRecordsThreadPool != null || idleTimeBetweenReadsInMillis != 0 || maxRecords != 0; + return retryGetRecordsInSeconds != null + || maxGetRecordsThreadPool != null + || idleTimeBetweenReadsInMillis != 0 + || maxRecords != 0; } @Override public PollingConfig build(KinesisAsyncClient kinesisAsyncClient, MultiLangDaemonConfiguration parent) { - return ConfigurationSettableUtils.resolveFields(this, new PollingConfig(parent.getStreamName(), kinesisAsyncClient)); + return ConfigurationSettableUtils.resolveFields( + this, new PollingConfig(parent.getStreamName(), kinesisAsyncClient)); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalConfigBuilder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalConfigBuilder.java index 36794a99..7fa9ff9d 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalConfigBuilder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalConfigBuilder.java @@ -21,7 +21,7 @@ import software.amazon.kinesis.retrieval.RetrievalSpecificConfig; public interface RetrievalConfigBuilder { /** * Creates a retrieval specific configuration using the supplied parameters, and internal class parameters - * + * * @param kinesisAsyncClient * the client that will be provided to the RetrievalSpecificConfig constructor * @param parent diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalMode.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalMode.java index bf65fffb..ebe6dcd1 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalMode.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalMode.java @@ -19,14 +19,14 @@ import java.util.Arrays; import java.util.function.Function; import java.util.stream.Collectors; -import org.apache.commons.lang3.Validate; - import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.Validate; @Slf4j public enum RetrievalMode { - FANOUT(MultiLangDaemonConfiguration::getFanoutConfig), POLLING( - MultiLangDaemonConfiguration::getPollingConfig), DEFAULT(RetrievalMode::decideForDefault); + FANOUT(MultiLangDaemonConfiguration::getFanoutConfig), + POLLING(MultiLangDaemonConfiguration::getPollingConfig), + DEFAULT(RetrievalMode::decideForDefault); private final Function builderFor; diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/TypeTag.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/TypeTag.java index ee630ecf..dc359b91 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/TypeTag.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/TypeTag.java @@ -15,14 +15,13 @@ package software.amazon.kinesis.multilang.config; -import lombok.Data; - import java.lang.reflect.Method; +import lombok.Data; + @Data class TypeTag { final Class type; final boolean hasConverter; final Method builderMethod; - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/credentials/V2CredentialWrapper.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/credentials/V2CredentialWrapper.java index d0afdf75..e1b6072a 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/credentials/V2CredentialWrapper.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/credentials/V2CredentialWrapper.java @@ -32,7 +32,10 @@ public class V2CredentialWrapper implements AwsCredentialsProvider { public AwsCredentials resolveCredentials() { AWSCredentials current = oldCredentialsProvider.getCredentials(); if (current instanceof AWSSessionCredentials) { - return AwsSessionCredentials.create(current.getAWSAccessKeyId(), current.getAWSSecretKey(), ((AWSSessionCredentials) current).getSessionToken()); + return AwsSessionCredentials.create( + current.getAWSAccessKeyId(), + current.getAWSSecretKey(), + ((AWSSessionCredentials) current).getSessionToken()); } return new AwsCredentials() { @Override diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/CheckpointMessage.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/CheckpointMessage.java index b738dcd7..6413d161 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/CheckpointMessage.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/CheckpointMessage.java @@ -36,6 +36,7 @@ public class CheckpointMessage extends Message { * The checkpoint this message is about. */ private String sequenceNumber; + private Long subSequenceNumber; /** @@ -45,7 +46,7 @@ public class CheckpointMessage extends Message { /** * Convenience constructor. - * + * * @param sequenceNumber * The sequence number that this message is about. * @param subSequenceNumber @@ -61,5 +62,4 @@ public class CheckpointMessage extends Message { this.setError(throwable.getClass().getSimpleName()); } } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/InitializeMessage.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/InitializeMessage.java index a04c639e..b6b12955 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/InitializeMessage.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/InitializeMessage.java @@ -33,29 +33,29 @@ public class InitializeMessage extends Message { * The shard id that this processor is getting initialized for. */ private String shardId; + private String sequenceNumber; private Long subSequenceNumber; /** * Default constructor. */ - public InitializeMessage() { - } + public InitializeMessage() {} /** * Convenience constructor. - * + * * @param initializationInput {@link InitializationInput} */ public InitializeMessage(InitializationInput initializationInput) { this.shardId = initializationInput.shardId(); if (initializationInput.extendedSequenceNumber() != null) { this.sequenceNumber = initializationInput.extendedSequenceNumber().sequenceNumber(); - this.subSequenceNumber = initializationInput.extendedSequenceNumber().subSequenceNumber(); + this.subSequenceNumber = + initializationInput.extendedSequenceNumber().subSequenceNumber(); } else { this.sequenceNumber = null; this.subSequenceNumber = null; } } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecord.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecord.java index ca020825..a3a09f22 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecord.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecord.java @@ -15,7 +15,6 @@ package software.amazon.kinesis.multilang.messages; import com.fasterxml.jackson.annotation.JsonProperty; - import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; import lombok.Getter; @@ -54,10 +53,11 @@ public class JsonFriendlyRecord { data = new byte[record.data().limit()]; record.data().get(data); } - Long approximateArrival = record.approximateArrivalTimestamp() == null ? null + Long approximateArrival = record.approximateArrivalTimestamp() == null + ? null : record.approximateArrivalTimestamp().toEpochMilli(); - return new JsonFriendlyRecord(data, record.partitionKey(), record.sequenceNumber(), - approximateArrival, record.subSequenceNumber()); + return new JsonFriendlyRecord( + data, record.partitionKey(), record.sequenceNumber(), approximateArrival, record.subSequenceNumber()); } @JsonProperty diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/LeaseLostMessage.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/LeaseLostMessage.java index aea0677f..ccf269b5 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/LeaseLostMessage.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/LeaseLostMessage.java @@ -21,5 +21,4 @@ package software.amazon.kinesis.multilang.messages; public class LeaseLostMessage extends Message { public static final String ACTION = "leaseLost"; - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/Message.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/Message.java index bdb89181..75fc1c68 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/Message.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/Message.java @@ -23,15 +23,15 @@ import com.fasterxml.jackson.databind.ObjectMapper; * Abstract class for all messages that are sent to the client's process. */ @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "action") -@JsonSubTypes({ - @Type(value = CheckpointMessage.class, name = CheckpointMessage.ACTION), - @Type(value = InitializeMessage.class, name = InitializeMessage.ACTION), - @Type(value = ProcessRecordsMessage.class, name = ProcessRecordsMessage.ACTION), - @Type(value = ShutdownMessage.class, name = ShutdownMessage.ACTION), - @Type(value = StatusMessage.class, name = StatusMessage.ACTION), - @Type(value = ShutdownRequestedMessage.class, name = ShutdownRequestedMessage.ACTION), - @Type(value = LeaseLostMessage.class, name = LeaseLostMessage.ACTION), - @Type(value = ShardEndedMessage.class, name = ShardEndedMessage.ACTION), +@JsonSubTypes({ + @Type(value = CheckpointMessage.class, name = CheckpointMessage.ACTION), + @Type(value = InitializeMessage.class, name = InitializeMessage.ACTION), + @Type(value = ProcessRecordsMessage.class, name = ProcessRecordsMessage.ACTION), + @Type(value = ShutdownMessage.class, name = ShutdownMessage.ACTION), + @Type(value = StatusMessage.class, name = StatusMessage.ACTION), + @Type(value = ShutdownRequestedMessage.class, name = ShutdownRequestedMessage.ACTION), + @Type(value = LeaseLostMessage.class, name = LeaseLostMessage.ACTION), + @Type(value = ShardEndedMessage.class, name = ShardEndedMessage.ACTION), }) public abstract class Message { @@ -40,11 +40,10 @@ public abstract class Message { /** * Default constructor. */ - public Message() { - } + public Message() {} /** - * + * * @param objectMapper An object mapper. * @return this */ @@ -54,7 +53,7 @@ public abstract class Message { } /** - * + * * @return A JSON representation of this object. */ public String toString() { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/ProcessRecordsMessage.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/ProcessRecordsMessage.java index 95601b2b..50eed164 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/ProcessRecordsMessage.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/ProcessRecordsMessage.java @@ -37,17 +37,17 @@ public class ProcessRecordsMessage extends Message { * The records that the client's process needs to handle. */ private List records; + private Long millisBehindLatest; /** * Default constructor. */ - public ProcessRecordsMessage() { - } + public ProcessRecordsMessage() {} /** * Convenience constructor. - * + * * @param processRecordsInput * the process records input to be sent to the child */ diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java index af905f90..a1b4561a 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java @@ -142,7 +142,6 @@ * Jackson doc for more details) MIME is the basis of most base64 encoding variants including RFC 3548 which is the standard used by Python's base64 module. - * + * */ package software.amazon.kinesis.multilang; - diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/Matchers.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/Matchers.java index b357c16b..2fd4fcb7 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/Matchers.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/Matchers.java @@ -14,15 +14,14 @@ */ package software.amazon.kinesis.multilang; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; - import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; - -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; import software.amazon.kinesis.lifecycle.events.InitializationInput; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; public class Matchers { @@ -58,8 +57,12 @@ public class Matchers { @Override public void describeTo(Description description) { - description.appendText("An InitializationInput matching: { shardId: ").appendDescriptionOf(shardIdMatcher) - .appendText(", sequenceNumber: ").appendDescriptionOf(sequenceNumberMatcher).appendText(" }"); + description + .appendText("An InitializationInput matching: { shardId: ") + .appendDescriptionOf(shardIdMatcher) + .appendText(", sequenceNumber: ") + .appendDescriptionOf(sequenceNumberMatcher) + .appendText(" }"); } } @@ -98,10 +101,11 @@ public class Matchers { @Override public void describeTo(Description description) { - description.appendText("An ExtendedSequenceNumber matching: { sequenceNumber: ") - .appendDescriptionOf(sequenceNumberMatcher).appendText(", subSequenceNumber: ") + description + .appendText("An ExtendedSequenceNumber matching: { sequenceNumber: ") + .appendDescriptionOf(sequenceNumberMatcher) + .appendText(", subSequenceNumber: ") .appendDescriptionOf(subSequenceNumberMatcher); } } - } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageReaderTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageReaderTest.java index b6541227..3ffcac14 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageReaderTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageReaderTest.java @@ -22,22 +22,20 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; - -import software.amazon.kinesis.multilang.MessageReader; import software.amazon.kinesis.multilang.messages.Message; import software.amazon.kinesis.multilang.messages.StatusMessage; -import com.fasterxml.jackson.databind.ObjectMapper; public class MessageReaderTest { - private static final String shardId = "shard-123"; + private static final String SHARD_ID = "shard-123"; - /* + /** * This line is based on the definition of the protocol for communication between the KCL record processor and * the client's process. */ @@ -45,7 +43,7 @@ public class MessageReaderTest { return String.format("{\"action\":\"checkpoint\", \"checkpoint\":\"%s\"}", sequenceNumber); } - /* + /** * This line is based on the definition of the protocol for communication between the KCL record processor and * the client's process. */ @@ -76,18 +74,19 @@ public class MessageReaderTest { @Test public void runLoopGoodInputTest() { - String[] sequenceNumbers = new String[] { "123", "456", "789" }; - String[] responseFors = new String[] { "initialize", "processRecords", "processRecords", "shutdown" }; + String[] sequenceNumbers = new String[] {"123", "456", "789"}; + String[] responseFors = new String[] {"initialize", "processRecords", "processRecords", "shutdown"}; InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors); MessageReader reader = - new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool()); + new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool()); for (String responseFor : responseFors) { - StatusMessage statusMessage = null; try { Message message = reader.getNextMessageFromSTDOUT().get(); if (message instanceof StatusMessage) { - Assert.assertEquals("The status message's responseFor field should have been correct", responseFor, + Assert.assertEquals( + "The status message's responseFor field should have been correct", + responseFor, ((StatusMessage) message).getResponseFor()); } } catch (InterruptedException | ExecutionException e) { @@ -98,19 +97,19 @@ public class MessageReaderTest { @Test public void drainInputTest() throws InterruptedException, ExecutionException { - String[] sequenceNumbers = new String[] { "123", "456", "789" }; - String[] responseFors = new String[] { "initialize", "processRecords", "processRecords", "shutdown" }; + String[] sequenceNumbers = new String[] {"123", "456", "789"}; + String[] responseFors = new String[] {"initialize", "processRecords", "processRecords", "shutdown"}; InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors); MessageReader reader = - new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool()); + new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool()); Future drainFuture = reader.drainSTDOUT(); Boolean drainResult = drainFuture.get(); Assert.assertNotNull(drainResult); Assert.assertTrue(drainResult); } - /* + /** * readValue should fail safely and just continue looping */ @Test @@ -118,25 +117,26 @@ public class MessageReaderTest { BufferedReader bufferReader = Mockito.mock(BufferedReader.class); try { Mockito.doAnswer(new Answer() { - private boolean returnedOnce = false; + private boolean returnedOnce = false; - @Override - public Object answer(InvocationOnMock invocation) throws Throwable { - if (returnedOnce) { - return "{\"action\":\"status\",\"responseFor\":\"processRecords\"}"; - } else { - returnedOnce = true; - return "{\"action\":\"shutdown\",\"reason\":\"ZOMBIE\"}"; - } - } - }).when(bufferReader).readLine(); + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + if (returnedOnce) { + return "{\"action\":\"status\",\"responseFor\":\"processRecords\"}"; + } else { + returnedOnce = true; + return "{\"action\":\"shutdown\",\"reason\":\"ZOMBIE\"}"; + } + } + }) + .when(bufferReader) + .readLine(); } catch (IOException e) { Assert.fail("There shouldn't be an exception while setting up this mock."); } - MessageReader reader = - new MessageReader().initialize(bufferReader, shardId, new ObjectMapper(), - Executors.newCachedThreadPool()); + MessageReader reader = new MessageReader() + .initialize(bufferReader, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool()); try { reader.getNextMessageFromSTDOUT().get(); @@ -150,7 +150,7 @@ public class MessageReaderTest { public void messageReaderBuilderTest() { InputStream stream = new ByteArrayInputStream("".getBytes()); MessageReader reader = - new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool()); + new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool()); Assert.assertNotNull(reader); } @@ -159,7 +159,7 @@ public class MessageReaderTest { BufferedReader input = Mockito.mock(BufferedReader.class); Mockito.doThrow(IOException.class).when(input).readLine(); MessageReader reader = - new MessageReader().initialize(input, shardId, new ObjectMapper(), Executors.newCachedThreadPool()); + new MessageReader().initialize(input, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool()); Future readTask = reader.getNextMessageFromSTDOUT(); @@ -167,7 +167,8 @@ public class MessageReaderTest { readTask.get(); Assert.fail("The reading task should have failed due to an IOException."); } catch (InterruptedException e) { - Assert.fail("The reading task should not have been interrupted. It should have failed due to an IOException."); + Assert.fail( + "The reading task should not have been interrupted. It should have failed due to an IOException."); } catch (ExecutionException e) { // Yay!! } @@ -177,7 +178,7 @@ public class MessageReaderTest { public void noMoreMessagesTest() throws InterruptedException { InputStream stream = new ByteArrayInputStream("".getBytes()); MessageReader reader = - new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool()); + new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool()); Future future = reader.getNextMessageFromSTDOUT(); try { diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageWriterTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageWriterTest.java index 6a0c06b4..90481b6c 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageWriterTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageWriterTest.java @@ -23,44 +23,37 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.mockito.Mockito; - -import software.amazon.kinesis.lifecycle.events.LeaseLostInput; -import software.amazon.kinesis.lifecycle.events.ShardEndedInput; -import software.amazon.kinesis.multilang.MessageWriter; -import software.amazon.kinesis.multilang.messages.LeaseLostMessage; -import software.amazon.kinesis.multilang.messages.Message; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; - import software.amazon.kinesis.lifecycle.events.InitializationInput; +import software.amazon.kinesis.lifecycle.events.LeaseLostInput; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; +import software.amazon.kinesis.lifecycle.events.ShardEndedInput; +import software.amazon.kinesis.multilang.messages.Message; import software.amazon.kinesis.retrieval.KinesisClientRecord; import static org.mockito.Mockito.verify; public class MessageWriterTest { - private static final String shardId = "shard-123"; + private static final String SHARD_ID = "shard-123"; MessageWriter messageWriter; OutputStream stream; @Rule public final ExpectedException thrown = ExpectedException.none(); - // ExecutorService executor; - @Before public void setup() { stream = Mockito.mock(OutputStream.class); messageWriter = - new MessageWriter().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool()); + new MessageWriter().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool()); } /* @@ -70,8 +63,7 @@ public class MessageWriterTest { public void writeCheckpointMessageNoErrorTest() throws IOException, InterruptedException, ExecutionException { Future future = this.messageWriter.writeCheckpointMessageWithError("1234", 0L, null); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @@ -79,42 +71,43 @@ public class MessageWriterTest { public void writeCheckpointMessageWithErrorTest() throws IOException, InterruptedException, ExecutionException { Future future = this.messageWriter.writeCheckpointMessageWithError("1234", 0L, new Throwable()); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @Test public void writeInitializeMessageTest() throws IOException, InterruptedException, ExecutionException { - Future future = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build()); + Future future = this.messageWriter.writeInitializeMessage( + InitializationInput.builder().shardId(SHARD_ID).build()); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @Test public void writeProcessRecordsMessageTest() throws IOException, InterruptedException, ExecutionException { List records = Arrays.asList( - KinesisClientRecord.builder().data(ByteBuffer.wrap("kitten".getBytes())).partitionKey("some cats") - .sequenceNumber("357234807854789057805").build(), - KinesisClientRecord.builder().build() - ); - Future future = this.messageWriter.writeProcessRecordsMessage(ProcessRecordsInput.builder().records(records).build()); + KinesisClientRecord.builder() + .data(ByteBuffer.wrap("kitten".getBytes())) + .partitionKey("some cats") + .sequenceNumber("357234807854789057805") + .build(), + KinesisClientRecord.builder().build()); + Future future = this.messageWriter.writeProcessRecordsMessage( + ProcessRecordsInput.builder().records(records).build()); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @Test public void writeShutdownMessageTest() throws IOException, InterruptedException, ExecutionException { - Future future = this.messageWriter.writeShardEndedMessage(ShardEndedInput.builder().build()); + Future future = this.messageWriter.writeShardEndedMessage( + ShardEndedInput.builder().build()); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @@ -123,28 +116,28 @@ public class MessageWriterTest { Future future = this.messageWriter.writeShutdownRequestedMessage(); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @Test public void streamIOExceptionTest() throws IOException, InterruptedException, ExecutionException { Mockito.doThrow(IOException.class).when(stream).flush(); - Future initializeTask = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build()); + Future initializeTask = this.messageWriter.writeInitializeMessage( + InitializationInput.builder().shardId(SHARD_ID).build()); Boolean result = initializeTask.get(); Assert.assertNotNull(result); Assert.assertFalse(result); } @Test - public void objectMapperFails() throws JsonProcessingException, InterruptedException, ExecutionException { + public void objectMapperFails() throws JsonProcessingException { thrown.expect(RuntimeException.class); thrown.expectMessage("Encountered I/O error while writing LeaseLostMessage action to subprocess"); ObjectMapper mapper = Mockito.mock(ObjectMapper.class); Mockito.doThrow(JsonProcessingException.class).when(mapper).writeValueAsString(Mockito.any(Message.class)); - messageWriter = new MessageWriter().initialize(stream, shardId, mapper, Executors.newCachedThreadPool()); + messageWriter = new MessageWriter().initialize(stream, SHARD_ID, mapper, Executors.newCachedThreadPool()); messageWriter.writeLeaseLossMessage(LeaseLostInput.builder().build()); } @@ -157,7 +150,8 @@ public class MessageWriterTest { Assert.assertFalse(this.messageWriter.isOpen()); try { // Any message should fail - this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build()); + this.messageWriter.writeInitializeMessage( + InitializationInput.builder().shardId(SHARD_ID).build()); Assert.fail("MessageWriter should be closed and unable to write."); } catch (IllegalStateException e) { // This should happen. diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonConfigTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonConfigTest.java index b86a64ad..de5a1405 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonConfigTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonConfigTest.java @@ -14,83 +14,183 @@ */ package software.amazon.kinesis.multilang; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; - import java.io.ByteArrayInputStream; import java.io.IOException; -import java.util.Properties; -import org.apache.commons.beanutils.BeanUtilsBean; -import org.apache.commons.beanutils.ConvertUtilsBean; -import org.junit.Before; +import junit.framework.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.runners.MockitoJUnitRunner; - -import junit.framework.Assert; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.regions.Region; import software.amazon.kinesis.multilang.config.KinesisClientLibConfigurator; import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class MultiLangDaemonConfigTest { - private static String FILENAME = "some.properties"; + private static final String FILENAME = "multilang.properties"; + private static final String EXE = "TestExe.exe"; + private static final String APPLICATION_NAME = MultiLangDaemonConfigTest.class.getSimpleName(); + private static final String STREAM_NAME = "fakeStream"; + private static final String STREAM_NAME_IN_ARN = "FAKE_STREAM_NAME"; + private static final Region REGION = Region.US_EAST_1; + private static final String STREAM_ARN = "arn:aws:kinesis:us-east-2:012345678987:stream/" + STREAM_NAME_IN_ARN; + + @Mock + private ClassLoader classLoader; @Mock private AwsCredentialsProvider credentialsProvider; + @Mock private AwsCredentials creds; - @Mock - private KinesisClientLibConfigurator configurator; - @Before - public void setup() { - ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean(); - BeanUtilsBean utilsBean = new BeanUtilsBean(convertUtilsBean); - MultiLangDaemonConfiguration multiLangDaemonConfiguration = new MultiLangDaemonConfiguration(utilsBean, - convertUtilsBean); - multiLangDaemonConfiguration.setApplicationName("cool-app"); - multiLangDaemonConfiguration.setStreamName("cool-stream"); - multiLangDaemonConfiguration.setWorkerIdentifier("cool-worker"); - when(credentialsProvider.resolveCredentials()).thenReturn(creds); - when(creds.accessKeyId()).thenReturn("cool-user"); - when(configurator.getConfiguration(any(Properties.class))).thenReturn(multiLangDaemonConfiguration); - } + private final KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator(); + private MultiLangDaemonConfig deamonConfig; - @Test - public void constructorTest() throws IOException { - String PROPERTIES = "executableName = randomEXE \n" + "applicationName = testApp \n" - + "streamName = fakeStream \n" + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" - + "processingLanguage = malbolge"; - ClassLoader classLoader = Mockito.mock(ClassLoader.class); + /** + * Instantiate a MultiLangDaemonConfig object + * @param streamName + * @param streamArn + * @throws IOException + */ + public void setup(String streamName, String streamArn) throws IOException { + String properties = String.format( + "executableName = %s\n" + + "applicationName = %s\n" + + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + + "processingLanguage = malbolge\n" + + "regionName = %s\n", + EXE, APPLICATION_NAME, "us-east-1"); - Mockito.doReturn(new ByteArrayInputStream(PROPERTIES.getBytes())).when(classLoader) + if (streamName != null) { + properties += String.format("streamName = %s\n", streamName); + } + if (streamArn != null) { + properties += String.format("streamArn = %s\n", streamArn); + } + classLoader = Mockito.mock(ClassLoader.class); + + Mockito.doReturn(new ByteArrayInputStream(properties.getBytes())) + .when(classLoader) .getResourceAsStream(FILENAME); - MultiLangDaemonConfig deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, configurator); + when(credentialsProvider.resolveCredentials()).thenReturn(creds); + when(creds.accessKeyId()).thenReturn("cool-user"); + deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, configurator); + } - assertNotNull(deamonConfig.getExecutorService()); - assertNotNull(deamonConfig.getMultiLangDaemonConfiguration()); - assertNotNull(deamonConfig.getRecordProcessorFactory()); + @Test(expected = IllegalArgumentException.class) + public void testConstructorFailsBecauseStreamArnIsInvalid() throws Exception { + setup("", "this_is_not_a_valid_arn"); + } + + @Test(expected = IllegalArgumentException.class) + public void testConstructorFailsBecauseStreamArnIsInvalid2() throws Exception { + setup("", "arn:aws:kinesis:us-east-2:ACCOUNT_ID:BadFormatting:stream/" + STREAM_NAME_IN_ARN); + } + + @Test(expected = IllegalArgumentException.class) + public void testConstructorFailsBecauseStreamNameAndArnAreEmpty() throws Exception { + setup("", ""); + } + + @Test(expected = NullPointerException.class) + public void testConstructorFailsBecauseStreamNameAndArnAreNull() throws Exception { + setup(null, null); + } + + @Test(expected = NullPointerException.class) + public void testConstructorFailsBecauseStreamNameIsNullAndArnIsEmpty() throws Exception { + setup(null, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testConstructorFailsBecauseStreamNameIsEmptyAndArnIsNull() throws Exception { + setup("", null); } @Test - public void propertyValidation() { - String PROPERTIES_NO_EXECUTABLE_NAME = "applicationName = testApp \n" + "streamName = fakeStream \n" + public void testConstructorUsingStreamName() throws IOException { + setup(STREAM_NAME, null); + + assertConfigurationsMatch(STREAM_NAME, null); + } + + @Test + public void testConstructorUsingStreamNameAndStreamArnIsEmpty() throws IOException { + setup(STREAM_NAME, ""); + + assertConfigurationsMatch(STREAM_NAME, ""); + } + + @Test + public void testConstructorUsingStreamNameAndStreamArnIsWhitespace() throws IOException { + setup(STREAM_NAME, " "); + + assertConfigurationsMatch(STREAM_NAME, ""); + } + + @Test + public void testConstructorUsingStreamArn() throws IOException { + setup(null, STREAM_ARN); + + assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN); + } + + @Test + public void testConstructorUsingStreamNameAsEmptyAndStreamArn() throws IOException { + setup("", STREAM_ARN); + + assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN); + } + + @Test + public void testConstructorUsingStreamArnOverStreamName() throws IOException { + setup(STREAM_NAME, STREAM_ARN); + + assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN); + } + + /** + * Verify the daemonConfig properties are what we expect them to be. + * + * @param expectedStreamName + */ + private void assertConfigurationsMatch(String expectedStreamName, String expectedStreamArn) { + final MultiLangDaemonConfiguration multiLangConfiguration = deamonConfig.getMultiLangDaemonConfiguration(); + assertNotNull(deamonConfig.getExecutorService()); + assertNotNull(multiLangConfiguration); + assertNotNull(deamonConfig.getRecordProcessorFactory()); + + assertEquals(EXE, deamonConfig.getRecordProcessorFactory().getCommandArray()[0]); + assertEquals(APPLICATION_NAME, multiLangConfiguration.getApplicationName()); + assertEquals(expectedStreamName, multiLangConfiguration.getStreamName()); + assertEquals(REGION, multiLangConfiguration.getDynamoDbClient().get("region")); + assertEquals(REGION, multiLangConfiguration.getCloudWatchClient().get("region")); + assertEquals(REGION, multiLangConfiguration.getKinesisClient().get("region")); + assertEquals(expectedStreamArn, multiLangConfiguration.getStreamArn()); + } + + @Test + public void testPropertyValidation() { + String propertiesNoExecutableName = "applicationName = testApp \n" + "streamName = fakeStream \n" + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + "processingLanguage = malbolge"; ClassLoader classLoader = Mockito.mock(ClassLoader.class); - Mockito.doReturn(new ByteArrayInputStream(PROPERTIES_NO_EXECUTABLE_NAME.getBytes())).when(classLoader) + Mockito.doReturn(new ByteArrayInputStream(propertiesNoExecutableName.getBytes())) + .when(classLoader) .getResourceAsStream(FILENAME); - MultiLangDaemonConfig config; try { - config = new MultiLangDaemonConfig(FILENAME, classLoader, configurator); + new MultiLangDaemonConfig(FILENAME, classLoader, configurator); Assert.fail("Construction of the config should have failed due to property validation failing."); } catch (IllegalArgumentException e) { // Good @@ -99,4 +199,13 @@ public class MultiLangDaemonConfigTest { } } + /** + * Test the loading of a "real" properties file. This test should catch + * any issues which might arise if there is a discrepancy between reality + * and mocking. + */ + @Test + public void testActualPropertiesFile() throws Exception { + new MultiLangDaemonConfig(FILENAME); + } } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonTest.java index 0c1d0b60..3e689437 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonTest.java @@ -14,10 +14,31 @@ */ package software.amazon.kinesis.multilang; +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; + +import ch.qos.logback.classic.LoggerContext; +import ch.qos.logback.classic.joran.JoranConfigurator; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import org.slf4j.LoggerFactory; +import software.amazon.kinesis.coordinator.Scheduler; +import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.isEmptyOrNullString; import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertThat; import static org.mockito.Matchers.anyObject; @@ -28,46 +49,29 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.io.File; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -import ch.qos.logback.classic.LoggerContext; -import ch.qos.logback.classic.joran.JoranConfigurator; -import software.amazon.kinesis.coordinator.Scheduler; -import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; - @RunWith(MockitoJUnitRunner.class) public class MultiLangDaemonTest { @Mock private Scheduler scheduler; + @Mock private MultiLangDaemonConfig config; + @Mock private ExecutorService executorService; + @Mock private Future futureInteger; + @Mock private MultiLangDaemonConfiguration multiLangDaemonConfiguration; + @Mock private Runtime runtime; @Rule public ExpectedException expectedException = ExpectedException.none(); + @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); @@ -87,7 +91,7 @@ public class MultiLangDaemonTest { public void testSuccessfulNoOptionsJCommanderBuild() { String testPropertiesFile = "/test/properties/file"; MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments(); - daemon.buildJCommanderAndParseArgs(arguments, new String[] { testPropertiesFile }); + daemon.buildJCommanderAndParseArgs(arguments, new String[] {testPropertiesFile}); assertThat(arguments.propertiesFile, nullValue()); assertThat(arguments.logConfiguration, nullValue()); @@ -99,7 +103,7 @@ public class MultiLangDaemonTest { public void testSuccessfulOptionsJCommanderBuild() { String propertiesOption = "/test/properties/file/option"; String propertiesFileArgs = "/test/properties/args"; - String[] args = new String[] { "-p", propertiesOption, propertiesFileArgs }; + String[] args = new String[] {"-p", propertiesOption, propertiesFileArgs}; MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments(); daemon.buildJCommanderAndParseArgs(arguments, args); @@ -125,7 +129,8 @@ public class MultiLangDaemonTest { LoggerContext loggerContext = spy((LoggerContext) LoggerFactory.getILoggerFactory()); JoranConfigurator configurator = spy(new JoranConfigurator()); - String logConfiguration = this.getClass().getClassLoader().getResource("logback.xml").getPath(); + String logConfiguration = + this.getClass().getClassLoader().getResource("logback.xml").getPath(); daemon.configureLogging(logConfiguration, loggerContext, configurator); verify(loggerContext).reset(); diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangProtocolTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangProtocolTest.java index dc6166aa..bed6b6f6 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangProtocolTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangProtocolTest.java @@ -14,6 +14,42 @@ */ package software.amazon.kinesis.multilang; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.google.common.util.concurrent.SettableFuture; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import software.amazon.kinesis.exceptions.InvalidStateException; +import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; +import software.amazon.kinesis.exceptions.ShutdownException; +import software.amazon.kinesis.exceptions.ThrottlingException; +import software.amazon.kinesis.lifecycle.events.InitializationInput; +import software.amazon.kinesis.lifecycle.events.LeaseLostInput; +import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; +import software.amazon.kinesis.lifecycle.events.ShardEndedInput; +import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; +import software.amazon.kinesis.multilang.messages.CheckpointMessage; +import software.amazon.kinesis.multilang.messages.LeaseLostMessage; +import software.amazon.kinesis.multilang.messages.Message; +import software.amazon.kinesis.multilang.messages.ProcessRecordsMessage; +import software.amazon.kinesis.multilang.messages.ShardEndedMessage; +import software.amazon.kinesis.multilang.messages.StatusMessage; +import software.amazon.kinesis.processor.RecordProcessorCheckpointer; +import software.amazon.kinesis.retrieval.KinesisClientRecord; + import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -27,68 +63,35 @@ import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; -import software.amazon.kinesis.lifecycle.events.LeaseLostInput; -import software.amazon.kinesis.lifecycle.events.ShardEndedInput; -import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; -import software.amazon.kinesis.multilang.messages.CheckpointMessage; -import software.amazon.kinesis.multilang.messages.LeaseLostMessage; -import software.amazon.kinesis.multilang.messages.Message; -import software.amazon.kinesis.multilang.messages.ProcessRecordsMessage; -import software.amazon.kinesis.multilang.messages.ShardEndedMessage; -import software.amazon.kinesis.multilang.messages.StatusMessage; -import com.google.common.util.concurrent.SettableFuture; - -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.retrieval.KinesisClientRecord; - @RunWith(MockitoJUnitRunner.class) public class MultiLangProtocolTest { private static final List EMPTY_RECORD_LIST = Collections.emptyList(); @Mock private MultiLangProtocol protocol; + @Mock private MessageWriter messageWriter; + @Mock private MessageReader messageReader; + private String shardId; + @Mock private RecordProcessorCheckpointer checkpointer; + @Mock private MultiLangDaemonConfiguration configuration; @Before public void setup() { this.shardId = "shard-id-123"; - protocol = new MultiLangProtocolForTesting(messageReader, messageWriter, - InitializationInput.builder().shardId(shardId).build(), configuration); + protocol = new MultiLangProtocolForTesting( + messageReader, + messageWriter, + InitializationInput.builder().shardId(shardId).build(), + configuration); when(configuration.getTimeoutInSeconds()).thenReturn(null); } @@ -106,38 +109,42 @@ public class MultiLangProtocolTest { } @Test - public void initializeTest() throws InterruptedException, ExecutionException { - when(messageWriter - .writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder() - .shardId(shardId).build())))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("initialize"), Message.class)); + public void testInitialize() { + when(messageWriter.writeInitializeMessage(argThat(Matchers.withInit( + InitializationInput.builder().shardId(shardId).build())))) + .thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage("initialize"), Message.class)); assertThat(protocol.initialize(), equalTo(true)); } @Test - public void processRecordsTest() throws InterruptedException, ExecutionException { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("processRecords"), Message.class)); + public void testProcessRecords() { + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage("processRecords"), Message.class)); - assertThat(protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()), + assertThat( + protocol.processRecords( + ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()), equalTo(true)); } @Test public void leaseLostTest() { when(messageWriter.writeLeaseLossMessage(any(LeaseLostInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage(LeaseLostMessage.ACTION), Message.class)); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage(LeaseLostMessage.ACTION), Message.class)); assertThat(protocol.leaseLost(LeaseLostInput.builder().build()), equalTo(true)); - } @Test public void shardEndedTest() { when(messageWriter.writeShardEndedMessage(any(ShardEndedInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage(ShardEndedMessage.ACTION))); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage(ShardEndedMessage.ACTION))); assertThat(protocol.shardEnded(ShardEndedInput.builder().build()), equalTo(true)); } @@ -145,12 +152,12 @@ public class MultiLangProtocolTest { @Test public void shutdownRequestedTest() { when(messageWriter.writeShutdownRequestedMessage()).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("shutdownRequested"), Message.class)); - Mockito.doReturn(buildFuture(true)).when(messageWriter) - .writeShutdownRequestedMessage(); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage("shutdownRequested"), Message.class)); + Mockito.doReturn(buildFuture(true)).when(messageWriter).writeShutdownRequestedMessage(); Mockito.doReturn(buildFuture(new StatusMessage("shutdownRequested"))) - .when(messageReader).getNextMessageFromSTDOUT(); + .when(messageReader) + .getNextMessageFromSTDOUT(); assertThat(protocol.shutdownRequested(null), equalTo(true)); } @@ -172,16 +179,17 @@ public class MultiLangProtocolTest { } return buildFuture(message); } - }.init(messages); } @Test - public void processRecordsWithCheckpointsTest() throws InterruptedException, ExecutionException, - KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + public void testProcessRecordsWithCheckpoints() + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(true)); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); + when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))) + .thenReturn(buildFuture(true)); when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList() { { this.add(new CheckpointMessage("123", 0L, null)); @@ -196,8 +204,10 @@ public class MultiLangProtocolTest { } })); - boolean result = protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST) - .checkpointer(checkpointer).build()); + boolean result = protocol.processRecords(ProcessRecordsInput.builder() + .records(EMPTY_RECORD_LIST) + .checkpointer(checkpointer) + .build()); assertThat(result, equalTo(true)); @@ -206,42 +216,53 @@ public class MultiLangProtocolTest { } @Test - public void processRecordsWithABadCheckpointTest() throws InterruptedException, ExecutionException { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(false)); + public void testProcessRecordsWithABadCheckpoint() { + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); + when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))) + .thenReturn(buildFuture(false)); when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList() { { this.add(new CheckpointMessage("456", 0L, null)); this.add(new StatusMessage("processRecords")); } })); - assertThat(protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST) - .checkpointer(checkpointer).build()), equalTo(false)); + assertThat( + protocol.processRecords(ProcessRecordsInput.builder() + .records(EMPTY_RECORD_LIST) + .checkpointer(checkpointer) + .build()), + equalTo(false)); } @Test(expected = NullPointerException.class) public void waitForStatusMessageTimeoutTest() throws InterruptedException, TimeoutException, ExecutionException { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); Future future = Mockito.mock(Future.class); when(messageReader.getNextMessageFromSTDOUT()).thenReturn(future); when(configuration.getTimeoutInSeconds()).thenReturn(5); when(future.get(anyInt(), eq(TimeUnit.SECONDS))).thenThrow(TimeoutException.class); - protocol = new MultiLangProtocolForTesting(messageReader, + protocol = new MultiLangProtocolForTesting( + messageReader, messageWriter, InitializationInput.builder().shardId(shardId).build(), configuration); - protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()); + protocol.processRecords( + ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()); } @Test public void waitForStatusMessageSuccessTest() { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("processRecords"), Message.class)); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage("processRecords"), Message.class)); when(configuration.getTimeoutInSeconds()).thenReturn(5); - assertTrue(protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build())); + assertTrue(protocol.processRecords( + ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build())); } private class MultiLangProtocolForTesting extends MultiLangProtocol { @@ -253,10 +274,11 @@ public class MultiLangProtocolTest { * @param initializationInput * @param configuration */ - MultiLangProtocolForTesting(final MessageReader messageReader, - final MessageWriter messageWriter, - final InitializationInput initializationInput, - final MultiLangDaemonConfiguration configuration) { + MultiLangProtocolForTesting( + final MessageReader messageReader, + final MessageWriter messageWriter, + final InitializationInput initializationInput, + final MultiLangDaemonConfiguration configuration) { super(messageReader, messageWriter, initializationInput, configuration); } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/NestedPropertyKeyTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/NestedPropertyKeyTest.java new file mode 100644 index 00000000..fbffee81 --- /dev/null +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/NestedPropertyKeyTest.java @@ -0,0 +1,110 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package software.amazon.kinesis.multilang; + +import com.amazonaws.regions.Regions; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static software.amazon.kinesis.multilang.NestedPropertyKey.ENDPOINT; +import static software.amazon.kinesis.multilang.NestedPropertyKey.ENDPOINT_REGION; +import static software.amazon.kinesis.multilang.NestedPropertyKey.EXTERNAL_ID; +import static software.amazon.kinesis.multilang.NestedPropertyKey.parse; + +@RunWith(MockitoJUnitRunner.class) +public class NestedPropertyKeyTest { + + @Mock + private NestedPropertyProcessor mockProcessor; + + @Test + public void testExternalId() { + final String expectedId = "eid"; + + parse(mockProcessor, createKey(EXTERNAL_ID, expectedId)); + verify(mockProcessor).acceptExternalId(expectedId); + } + + @Test + public void testEndpoint() { + final String expectedEndpoint = "https://sts.us-east-1.amazonaws.com"; + final String expectedRegion = "us-east-1"; + final String param = createKey(ENDPOINT, expectedEndpoint + "^" + expectedRegion); + + parse(mockProcessor, param); + verify(mockProcessor).acceptEndpoint(expectedEndpoint, expectedRegion); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidEndpoint() { + parse(mockProcessor, createKey(ENDPOINT, "value-sans-caret-delimiter")); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidEndpointDoubleCaret() { + parse(mockProcessor, createKey(ENDPOINT, "https://sts.us-east-1.amazonaws.com^us-east-1^borkbork")); + } + + @Test + public void testEndpointRegion() { + final Regions expectedRegion = Regions.GovCloud; + + parse(mockProcessor, createKey(ENDPOINT_REGION, expectedRegion.getName())); + verify(mockProcessor).acceptEndpointRegion(expectedRegion); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidEndpointRegion() { + parse(mockProcessor, createKey(ENDPOINT_REGION, "snuffleupagus")); + } + + /** + * Test that the literal nested key (i.e., {@code key=} in {@code some_val|key=nested_val}) + * does not change. Any change to an existing literal key is not backwards-compatible. + */ + @Test + public void testKeysExplicitly() { + // Adding a new enum will deliberately cause this assert to fail, and + // therefore raise awareness for this explicit test. Add-and-remove may + // keep the number unchanged yet will also break (by removing an enum). + assertEquals(3, NestedPropertyKey.values().length); + + assertEquals("endpoint", ENDPOINT.getNestedKey()); + assertEquals("endpointRegion", ENDPOINT_REGION.getNestedKey()); + assertEquals("externalId", EXTERNAL_ID.getNestedKey()); + } + + @Test + public void testNonmatchingParameters() { + final String[] params = new String[] { + null, + "", + "hello world", // no nested key + "foo=bar", // nested key, but is not a recognized key + createKey(EXTERNAL_ID, "eid") + "=extra", // valid key made invalid by second '=' + }; + parse(mockProcessor, params); + verifyZeroInteractions(mockProcessor); + } + + private static String createKey(final NestedPropertyKey key, final String value) { + return key.getNestedKey() + "=" + value; + } +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/ReadSTDERRTaskTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/ReadSTDERRTaskTest.java index b3bb0719..9876fd21 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/ReadSTDERRTaskTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/ReadSTDERRTaskTest.java @@ -27,12 +27,10 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import software.amazon.kinesis.multilang.DrainChildSTDERRTask; -import software.amazon.kinesis.multilang.LineReaderTask; public class ReadSTDERRTaskTest { - private static final String shardId = "shard-123"; + private static final String SHARD_ID = "shard-123"; private BufferedReader mockBufferReader; @Before @@ -45,7 +43,7 @@ public class ReadSTDERRTaskTest { String errorMessages = "OMG\nThis is test message\n blah blah blah \n"; InputStream stream = new ByteArrayInputStream(errorMessages.getBytes()); - LineReaderTask reader = new DrainChildSTDERRTask().initialize(stream, shardId, ""); + LineReaderTask reader = new DrainChildSTDERRTask().initialize(stream, SHARD_ID, ""); Assert.assertNotNull(reader); } @@ -54,7 +52,7 @@ public class ReadSTDERRTaskTest { String errorMessages = "OMG\nThis is test message\n blah blah blah \n"; BufferedReader bufferReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(errorMessages.getBytes()))); - LineReaderTask errorReader = new DrainChildSTDERRTask().initialize(bufferReader, shardId, ""); + LineReaderTask errorReader = new DrainChildSTDERRTask().initialize(bufferReader, SHARD_ID, ""); Assert.assertNotNull(errorReader); Boolean result = errorReader.call(); @@ -67,14 +65,15 @@ public class ReadSTDERRTaskTest { } catch (IOException e) { Assert.fail("Not supposed to get an exception when we're just building our mock."); } - LineReaderTask errorReader = new DrainChildSTDERRTask().initialize(mockBufferReader, shardId, ""); + LineReaderTask errorReader = new DrainChildSTDERRTask().initialize(mockBufferReader, SHARD_ID, ""); Assert.assertNotNull(errorReader); Future result = Executors.newCachedThreadPool().submit(errorReader); Boolean finishedCleanly = null; try { finishedCleanly = result.get(); } catch (InterruptedException | ExecutionException e) { - Assert.fail("Should have been able to get a result. The error should be handled during the call and result in false."); + Assert.fail( + "Should have been able to get a result. The error should be handled during the call and result in false."); } Assert.assertFalse("Reading a line should have thrown an exception", finishedCleanly); } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java index 7a7d7b11..d1f67ad8 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java @@ -14,17 +14,13 @@ */ package software.amazon.kinesis.multilang; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; import org.junit.Assert; import org.junit.Test; - -import software.amazon.kinesis.multilang.MultiLangRecordProcessorFactory; -import software.amazon.kinesis.multilang.MultiLangShardRecordProcessor; -import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; -import software.amazon.kinesis.processor.ShardRecordProcessor; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; +import software.amazon.kinesis.processor.ShardRecordProcessor; @RunWith(MockitoJUnitRunner.class) public class StreamingShardRecordProcessorFactoryTest { @@ -34,10 +30,13 @@ public class StreamingShardRecordProcessorFactoryTest { @Test public void createProcessorTest() { - MultiLangRecordProcessorFactory factory = new MultiLangRecordProcessorFactory("somecommand", null, configuration); + MultiLangRecordProcessorFactory factory = + new MultiLangRecordProcessorFactory("somecommand", null, configuration); ShardRecordProcessor processor = factory.shardRecordProcessor(); - Assert.assertEquals("Should have constructed a StreamingRecordProcessor", MultiLangShardRecordProcessor.class, + Assert.assertEquals( + "Should have constructed a StreamingRecordProcessor", + MultiLangShardRecordProcessor.class, processor.getClass()); } } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorTest.java index e3368e07..4eb66db1 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorTest.java @@ -14,16 +14,6 @@ */ package software.amazon.kinesis.multilang; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.argThat; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.Collections; @@ -33,6 +23,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -42,13 +33,8 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.runners.MockitoJUnitRunner; import org.mockito.stubbing.Answer; - -import com.fasterxml.jackson.databind.ObjectMapper; - import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.kinesis.exceptions.InvalidStateException; import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.ShutdownException; import software.amazon.kinesis.exceptions.ThrottlingException; import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; @@ -64,92 +50,98 @@ import software.amazon.kinesis.processor.PreparedCheckpointer; import software.amazon.kinesis.processor.RecordProcessorCheckpointer; import software.amazon.kinesis.retrieval.KinesisClientRecord; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class StreamingShardRecordProcessorTest { - private static final String shardId = "shard-123"; + private static final String SHARD_ID = "shard-123"; private int systemExitCount = 0; @Mock private Future messageFuture; + @Mock private Future trueFuture; private RecordProcessorCheckpointer unimplementedCheckpointer = new RecordProcessorCheckpointer() { @Override - public void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException, - ThrottlingException, ShutdownException { + public void checkpoint() throws KinesisClientLibDependencyException, ThrottlingException { throw new UnsupportedOperationException(); } @Override - public void checkpoint(String sequenceNumber) throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { + public void checkpoint(String sequenceNumber) + throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException { throw new UnsupportedOperationException(); } @Override - public void checkpoint(Record record) - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException { + public void checkpoint(Record record) throws KinesisClientLibDependencyException, ThrottlingException { throw new UnsupportedOperationException(); } @Override public void checkpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { + throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException { throw new UnsupportedOperationException(); } @Override public PreparedCheckpointer prepareCheckpoint() - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException { + throws KinesisClientLibDependencyException, ThrottlingException { throw new UnsupportedOperationException(); } @Override - public PreparedCheckpointer prepareCheckpoint(byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + public PreparedCheckpointer prepareCheckpoint(byte[] applicationState) + throws KinesisClientLibDependencyException, ThrottlingException { throw new UnsupportedOperationException(); } @Override public PreparedCheckpointer prepareCheckpoint(Record record) - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException { + throws KinesisClientLibDependencyException, ThrottlingException { throw new UnsupportedOperationException(); } @Override - public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState) + throws KinesisClientLibDependencyException, ThrottlingException { throw new UnsupportedOperationException(); } @Override public PreparedCheckpointer prepareCheckpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { + throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException { throw new UnsupportedOperationException(); } @Override - public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { + public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState) + throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException { return null; } @Override public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { + throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException { throw new UnsupportedOperationException(); } @Override - public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { + public PreparedCheckpointer prepareCheckpoint( + String sequenceNumber, long subSequenceNumber, byte[] applicationState) + throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException { throw new UnsupportedOperationException(); } @@ -171,7 +163,7 @@ public class StreamingShardRecordProcessorTest { private MultiLangDaemonConfiguration configuration; @Before - public void prepare() throws IOException, InterruptedException, ExecutionException { + public void prepare() throws InterruptedException, ExecutionException { // Fake command systemExitCount = 0; @@ -185,8 +177,14 @@ public class StreamingShardRecordProcessorTest { when(configuration.getTimeoutInSeconds()).thenReturn(null); recordProcessor = - new MultiLangShardRecordProcessor(new ProcessBuilder(), executor, new ObjectMapper(), messageWriter, - messageReader, errorReader, configuration) { + new MultiLangShardRecordProcessor( + new ProcessBuilder(), + executor, + new ObjectMapper(), + messageWriter, + messageReader, + errorReader, + configuration) { // Just don't do anything when we exit. void exit() { @@ -210,9 +208,12 @@ public class StreamingShardRecordProcessorTest { Mockito.doReturn(Mockito.mock(Future.class)).when(messageReader).drainSTDOUT(); Mockito.doReturn(true).when(trueFuture).get(); - when(messageWriter.writeInitializeMessage(any(InitializationInput.class))).thenReturn(trueFuture); - when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(trueFuture); - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(trueFuture); + when(messageWriter.writeInitializeMessage(any(InitializationInput.class))) + .thenReturn(trueFuture); + when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))) + .thenReturn(trueFuture); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(trueFuture); when(messageWriter.writeLeaseLossMessage(any(LeaseLostInput.class))).thenReturn(trueFuture); } @@ -230,22 +231,29 @@ public class StreamingShardRecordProcessorTest { List testRecords = Collections.emptyList(); - recordProcessor.initialize(InitializationInput.builder().shardId(shardId).build()); - recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords) - .checkpointer(unimplementedCheckpointer).build()); - recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords) - .checkpointer(unimplementedCheckpointer).build()); + recordProcessor.initialize( + InitializationInput.builder().shardId(SHARD_ID).build()); + recordProcessor.processRecords(ProcessRecordsInput.builder() + .records(testRecords) + .checkpointer(unimplementedCheckpointer) + .build()); + recordProcessor.processRecords(ProcessRecordsInput.builder() + .records(testRecords) + .checkpointer(unimplementedCheckpointer) + .build()); recordProcessor.leaseLost(LeaseLostInput.builder().build()); } @Test public void processorPhasesTest() throws InterruptedException, ExecutionException { - Answer answer = new Answer() { - StatusMessage[] answers = new StatusMessage[] { new StatusMessage(InitializeMessage.ACTION), - new StatusMessage(ProcessRecordsMessage.ACTION), new StatusMessage(ProcessRecordsMessage.ACTION), - new StatusMessage(ShutdownMessage.ACTION) }; + StatusMessage[] answers = new StatusMessage[] { + new StatusMessage(InitializeMessage.ACTION), + new StatusMessage(ProcessRecordsMessage.ACTION), + new StatusMessage(ProcessRecordsMessage.ACTION), + new StatusMessage(ShutdownMessage.ACTION) + }; int callCount = 0; @@ -263,7 +271,7 @@ public class StreamingShardRecordProcessorTest { verify(messageWriter) .writeInitializeMessage(argThat(Matchers.withInit( - InitializationInput.builder().shardId(shardId).build()))); + InitializationInput.builder().shardId(SHARD_ID).build()))); verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class)); verify(messageWriter).writeLeaseLossMessage(any(LeaseLostInput.class)); } @@ -276,9 +284,12 @@ public class StreamingShardRecordProcessorTest { * This bad message will cause shutdown to not attempt to send a message. i.e. avoid encountering an * exception. */ - StatusMessage[] answers = new StatusMessage[] { new StatusMessage("Bad"), - new StatusMessage(ProcessRecordsMessage.ACTION), new StatusMessage(ProcessRecordsMessage.ACTION), - new StatusMessage(ShutdownMessage.ACTION) }; + StatusMessage[] answers = new StatusMessage[] { + new StatusMessage("Bad"), + new StatusMessage(ProcessRecordsMessage.ACTION), + new StatusMessage(ProcessRecordsMessage.ACTION), + new StatusMessage(ShutdownMessage.ACTION) + }; int callCount = 0; @@ -294,8 +305,9 @@ public class StreamingShardRecordProcessorTest { phases(answer); - verify(messageWriter).writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder() - .shardId(shardId).build()))); + verify(messageWriter) + .writeInitializeMessage(argThat(Matchers.withInit( + InitializationInput.builder().shardId(SHARD_ID).build()))); verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class)); verify(messageWriter, never()).writeLeaseLossMessage(any(LeaseLostInput.class)); Assert.assertEquals(1, systemExitCount); diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProviderTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProviderTest.java new file mode 100644 index 00000000..c27a425d --- /dev/null +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProviderTest.java @@ -0,0 +1,70 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package software.amazon.kinesis.multilang.auth; + +import java.util.Arrays; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class KclSTSAssumeRoleSessionCredentialsProviderTest { + + private static final String ARN = "arn"; + private static final String SESSION_NAME = "sessionName"; + + /** + * Test that the constructor doesn't throw an out-of-bounds exception if + * there are no parameters beyond the required ARN and session name. + */ + @Test + public void testConstructorWithoutOptionalParams() { + new KclSTSAssumeRoleSessionCredentialsProvider(new String[] {ARN, SESSION_NAME}); + } + + @Test + public void testAcceptEndpoint() { + // discovered exception during e2e testing; therefore, this test is + // to simply verify the constructed STS client doesn't go *boom* + final KclSTSAssumeRoleSessionCredentialsProvider provider = + new KclSTSAssumeRoleSessionCredentialsProvider(ARN, SESSION_NAME); + provider.acceptEndpoint("endpoint", "us-east-1"); + } + + @Test + public void testVarArgs() { + for (final String[] varargs : Arrays.asList( + new String[] {ARN, SESSION_NAME, "externalId=eid", "foo"}, + new String[] {ARN, SESSION_NAME, "foo", "externalId=eid"})) { + final VarArgsSpy provider = new VarArgsSpy(varargs); + assertEquals("eid", provider.externalId); + } + } + + private static class VarArgsSpy extends KclSTSAssumeRoleSessionCredentialsProvider { + + private String externalId; + + public VarArgsSpy(String[] args) { + super(args); + } + + @Override + public void acceptExternalId(final String externalId) { + this.externalId = externalId; + super.acceptExternalId(externalId); + } + } +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java index 8da22d53..ba5a0925 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java @@ -14,37 +14,33 @@ */ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; - import java.util.Arrays; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSCredentialsProviderChain; import com.amazonaws.auth.BasicAWSCredentials; import lombok.ToString; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; import org.junit.Test; +import software.amazon.kinesis.multilang.auth.KclSTSAssumeRoleSessionCredentialsProvider; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.AWSCredentialsProviderChain; - -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; public class AWSCredentialsProviderPropertyValueDecoderTest { private static final String TEST_ACCESS_KEY_ID = "123"; private static final String TEST_SECRET_KEY = "456"; - private String credentialName1 = "software.amazon.kinesis.multilang.config.AWSCredentialsProviderPropertyValueDecoderTest$AlwaysSucceedCredentialsProvider"; - private String credentialName2 = "software.amazon.kinesis.multilang.config.AWSCredentialsProviderPropertyValueDecoderTest$ConstructorCredentialsProvider"; - private AWSCredentialsProviderPropertyValueDecoder decoder = new AWSCredentialsProviderPropertyValueDecoder(); + private final String credentialName1 = AlwaysSucceedCredentialsProvider.class.getName(); + private final String credentialName2 = ConstructorCredentialsProvider.class.getName(); + private final AWSCredentialsProviderPropertyValueDecoder decoder = new AWSCredentialsProviderPropertyValueDecoder(); @ToString private static class AWSCredentialsMatcher extends TypeSafeDiagnosingMatcher { @@ -59,10 +55,6 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { this.classMatcher = instanceOf(AWSCredentialsProviderChain.class); } - private AWSCredentialsMatcher(AWSCredentials expected) { - this(expected.getAWSAccessKeyId(), expected.getAWSSecretKey()); - } - @Override protected boolean matchesSafely(AWSCredentialsProvider item, Description mismatchDescription) { AWSCredentials actual = item.getCredentials(); @@ -86,10 +78,10 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { @Override public void describeTo(Description description) { - description.appendText("An AWSCredentialsProvider that provides an AWSCredential matching: ") + description + .appendText("An AWSCredentialsProvider that provides an AWSCredential matching: ") .appendList("(", ", ", ")", Arrays.asList(classMatcher, akidMatcher, secretMatcher)); } - } private static AWSCredentialsMatcher hasCredentials(String akid, String secret) { @@ -120,6 +112,33 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { assertThat(provider, hasCredentials("arg1", "arg2")); } + /** + * Test that providers in the multi-lang auth package can be resolved and instantiated. + */ + @Test + public void testKclAuthProvider() { + for (final String className : Arrays.asList( + KclSTSAssumeRoleSessionCredentialsProvider.class.getName(), // fully-qualified name + KclSTSAssumeRoleSessionCredentialsProvider.class.getSimpleName() // name-only; needs prefix + )) { + final AWSCredentialsProvider provider = decoder.decodeValue(className + "|arn|sessionName"); + assertNotNull(className, provider); + } + } + + /** + * Test that a provider can be instantiated by its varargs constructor. + */ + @Test + public void testVarArgAuthProvider() { + final String[] args = new String[] {"arg1", "arg2", "arg3"}; + final String className = VarArgCredentialsProvider.class.getName(); + final String encodedValue = className + "|" + String.join("|", args); + + final AWSCredentialsProvider provider = decoder.decodeValue(encodedValue); + assertEquals(Arrays.toString(args), provider.getCredentials().getAWSAccessKeyId()); + } + /** * This credentials provider will always succeed */ @@ -131,9 +150,7 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -144,9 +161,9 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { private String arg1; private String arg2; + @SuppressWarnings("unused") public ConstructorCredentialsProvider(String arg1) { - this.arg1 = arg1; - this.arg2 = "blank"; + this(arg1, "blank"); } public ConstructorCredentialsProvider(String arg1, String arg2) { @@ -160,8 +177,25 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { } @Override - public void refresh() { + public void refresh() {} + } + private static class VarArgCredentialsProvider implements AWSCredentialsProvider { + + private final String[] args; + + public VarArgCredentialsProvider(final String[] args) { + this.args = args; } + + @Override + public AWSCredentials getCredentials() { + // KISS solution to surface the constructor args + final String flattenedArgs = Arrays.toString(args); + return new BasicAWSCredentials(flattenedArgs, flattenedArgs); + } + + @Override + public void refresh() {} } } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/BuilderDynaBeanTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/BuilderDynaBeanTest.java index 9038453a..ac687b82 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/BuilderDynaBeanTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/BuilderDynaBeanTest.java @@ -15,16 +15,14 @@ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - import java.util.function.Consumer; import java.util.function.Supplier; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.RequiredArgsConstructor; +import lombok.ToString; +import lombok.experimental.Accessors; import org.apache.commons.beanutils.BeanUtilsBean; import org.apache.commons.beanutils.ConvertUtilsBean; import org.junit.Before; @@ -32,11 +30,12 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.RequiredArgsConstructor; -import lombok.ToString; -import lombok.experimental.Accessors; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; public class BuilderDynaBeanTest { @@ -109,8 +108,8 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateAllParameters() throws Exception { - TestComplexCreate expected = TestComplexCreate.create("real", - TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build()); + TestComplexCreate expected = TestComplexCreate.create( + "real", TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build()); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreate.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "[0]", expected.realName); @@ -136,8 +135,8 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateComplexParameterOnly() throws Exception { - TestComplexCreate expected = TestComplexCreate.create(null, - TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build()); + TestComplexCreate expected = TestComplexCreate.create( + null, TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build()); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreate.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "[1].stringL1", expected.test1.stringL1); @@ -161,7 +160,8 @@ public class BuilderDynaBeanTest { @Test public void testSimpleBuilderAllParameters() throws Exception { - TestSimpleBuilder expected = TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build(); + TestSimpleBuilder expected = + TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "stringL1", expected.stringL1); @@ -213,12 +213,14 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateSimpleBuilderVariantAllParameters() throws Exception { - TestSimpleBuilder variant = TestSimpleBuilder.builder().longVal(10L).stringL1("variant").build(); + TestSimpleBuilder variant = + TestSimpleBuilder.builder().longVal(10L).stringL1("variant").build(); TestComplexCreateVariance expected = TestComplexCreateVariance.create("simple-builder", variant); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName); - utilsBean.setProperty(builderDynaBean, "[1].class", expected.variant.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, "[1].class", expected.variant.getClass().getName()); utilsBean.setProperty(builderDynaBean, "[1].longVal", variant.longVal); utilsBean.setProperty(builderDynaBean, "[1].stringL1", variant.stringL1); @@ -229,8 +231,11 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateVariantBuilderAllParameters() throws Exception { - TestVariantBuilder variant = TestVariantBuilder.builder().variantBuilderName("variant-build").intClass(20) - .testEnum(TestEnum.Blue).build(); + TestVariantBuilder variant = TestVariantBuilder.builder() + .variantBuilderName("variant-build") + .intClass(20) + .testEnum(TestEnum.Blue) + .build(); TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant", variant); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean); @@ -264,13 +269,16 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateVariantBuilderAllParametersPrefixWithJoiner() throws Exception { - TestVariantBuilder variant = TestVariantBuilder.builder().variantBuilderName("variant-build").intClass(20) - .testEnum(TestEnum.Blue).build(); + TestVariantBuilder variant = TestVariantBuilder.builder() + .variantBuilderName("variant-build") + .intClass(20) + .testEnum(TestEnum.Blue) + .build(); TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant-prefix", variant); String prefix = variant.getClass().getEnclosingClass().getName() + "$"; - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, - prefix); + BuilderDynaBean builderDynaBean = + new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, prefix); utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName); utilsBean.setProperty(builderDynaBean, "[1].class", variant.getClass().getSimpleName()); utilsBean.setProperty(builderDynaBean, "[1].variantBuilderName", variant.variantBuilderName); @@ -284,13 +292,16 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateVariantBuilderAllParametersPrefixWithOutJoiner() throws Exception { - TestVariantBuilder variant = TestVariantBuilder.builder().variantBuilderName("variant-build").intClass(20) - .testEnum(TestEnum.Blue).build(); + TestVariantBuilder variant = TestVariantBuilder.builder() + .variantBuilderName("variant-build") + .intClass(20) + .testEnum(TestEnum.Blue) + .build(); TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant-prefix", variant); String prefix = variant.getClass().getEnclosingClass().getName(); - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, - prefix); + BuilderDynaBean builderDynaBean = + new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, prefix); utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName); utilsBean.setProperty(builderDynaBean, "[1].class", variant.getClass().getSimpleName()); utilsBean.setProperty(builderDynaBean, "[1].variantBuilderName", variant.variantBuilderName); @@ -330,11 +341,21 @@ public class BuilderDynaBeanTest { @Test public void testComplexRootAllParameters() throws Exception { - TestSimpleBuilder simpleBuilder = TestSimpleBuilder.builder().stringL1("simple-l1").longVal(20L).build(); - TestRootClass expected = TestRootClass.builder().intVal(10).stringVal("root").testEnum(TestEnum.Red) - .testComplexCreate(TestComplexCreate.create("real", - TestSimpleBuilder.builder().stringL1("complex-l1").longVal(10L).build())) - .testSimpleBuilder(simpleBuilder).testSimpleCreate(TestSimpleCreate.create("first", "last")).build(); + TestSimpleBuilder simpleBuilder = + TestSimpleBuilder.builder().stringL1("simple-l1").longVal(20L).build(); + TestRootClass expected = TestRootClass.builder() + .intVal(10) + .stringVal("root") + .testEnum(TestEnum.Red) + .testComplexCreate(TestComplexCreate.create( + "real", + TestSimpleBuilder.builder() + .stringL1("complex-l1") + .longVal(10L) + .build())) + .testSimpleBuilder(simpleBuilder) + .testSimpleCreate(TestSimpleCreate.create("first", "last")) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestRootClass.class, convertUtilsBean); @@ -342,10 +363,10 @@ public class BuilderDynaBeanTest { utilsBean.setProperty(builderDynaBean, "stringVal", expected.stringVal); utilsBean.setProperty(builderDynaBean, "testEnum", expected.testEnum); utilsBean.setProperty(builderDynaBean, "testComplexCreate.[0]", expected.testComplexCreate.realName); - utilsBean.setProperty(builderDynaBean, "testComplexCreate.[1].stringL1", - expected.testComplexCreate.test1.stringL1); - utilsBean.setProperty(builderDynaBean, "testComplexCreate.[1].longVal", - expected.testComplexCreate.test1.longVal); + utilsBean.setProperty( + builderDynaBean, "testComplexCreate.[1].stringL1", expected.testComplexCreate.test1.stringL1); + utilsBean.setProperty( + builderDynaBean, "testComplexCreate.[1].longVal", expected.testComplexCreate.test1.longVal); utilsBean.setProperty(builderDynaBean, "testSimpleBuilder.class", TestSimpleBuilder.class.getName()); utilsBean.setProperty(builderDynaBean, "testSimpleBuilder.stringL1", simpleBuilder.stringL1); utilsBean.setProperty(builderDynaBean, "testSimpleBuilder.longVal", simpleBuilder.longVal); @@ -370,7 +391,11 @@ public class BuilderDynaBeanTest { @Test public void testComplexRootTopLevelOnly() throws Exception { - TestRootClass expected = TestRootClass.builder().intVal(10).stringVal("root").testEnum(TestEnum.Red).build(); + TestRootClass expected = TestRootClass.builder() + .intVal(10) + .stringVal("root") + .testEnum(TestEnum.Red) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestRootClass.class, convertUtilsBean); @@ -385,12 +410,17 @@ public class BuilderDynaBeanTest { @Test public void testSupplierNotUsed() throws Exception { - TestVariantBuilder variant = TestVariantBuilder.builder().testEnum(TestEnum.Green).intClass(10) - .variantBuilderName("variant-supplier").build(); - TestSupplierClass expected = TestSupplierClass.builder().variantClass(variant).build(); + TestVariantBuilder variant = TestVariantBuilder.builder() + .testEnum(TestEnum.Green) + .intClass(10) + .variantBuilderName("variant-supplier") + .build(); + TestSupplierClass expected = + TestSupplierClass.builder().variantClass(variant).build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSupplierClass.class, convertUtilsBean); - utilsBean.setProperty(builderDynaBean, "variantClass.class", variant.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, "variantClass.class", variant.getClass().getName()); utilsBean.setProperty(builderDynaBean, "variantClass.testEnum", variant.testEnum); utilsBean.setProperty(builderDynaBean, "variantClass.intClass", variant.intClass); utilsBean.setProperty(builderDynaBean, "variantClass.variantBuilderName", variant.variantBuilderName); @@ -422,8 +452,11 @@ public class BuilderDynaBeanTest { @Test public void testVariantBuildsToSuperType() throws Exception { - TestVariantBuilder expected = TestVariantBuilder.builder().intClass(10).testEnum(TestEnum.Green) - .variantBuilderName("variant-super").build(); + TestVariantBuilder expected = TestVariantBuilder.builder() + .intClass(10) + .testEnum(TestEnum.Green) + .variantBuilderName("variant-super") + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "class", expected.getClass().getName()); @@ -439,9 +472,11 @@ public class BuilderDynaBeanTest { @Test public void testEmptyPropertyHandler() throws Exception { String emptyPropertyValue = "test-property"; - TestVariantCreate expected = TestVariantCreate.create(emptyPropertyValue, (long) emptyPropertyValue.length(), - emptyPropertyValue + "-vary"); - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean, + TestVariantCreate expected = TestVariantCreate.create( + emptyPropertyValue, (long) emptyPropertyValue.length(), emptyPropertyValue + "-vary"); + BuilderDynaBean builderDynaBean = new BuilderDynaBean( + TestInterface.class, + convertUtilsBean, s -> TestVariantCreate.create(s, (long) s.length(), s + "-vary")); utilsBean.setProperty(builderDynaBean, "", emptyPropertyValue); @@ -455,8 +490,8 @@ public class BuilderDynaBeanTest { thrown.expect(IllegalStateException.class); thrown.expectMessage(containsString("When a property handler is resolved further properties may not be set.")); - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean, - s -> TestVariantCreate.create("test", 10, "test")); + BuilderDynaBean builderDynaBean = new BuilderDynaBean( + TestInterface.class, convertUtilsBean, s -> TestVariantCreate.create("test", 10, "test")); utilsBean.setProperty(builderDynaBean, "", "test"); utilsBean.setProperty(builderDynaBean, "[0]", "test"); } @@ -468,8 +503,8 @@ public class BuilderDynaBeanTest { thrown.expectMessage(containsString(TestInterface.class.getName())); thrown.expectMessage(containsString("cannot be assigned to")); - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean, - s -> TestEnum.Green); + BuilderDynaBean builderDynaBean = + new BuilderDynaBean(TestInterface.class, convertUtilsBean, s -> TestEnum.Green); utilsBean.setProperty(builderDynaBean, "", "test"); @@ -478,8 +513,11 @@ public class BuilderDynaBeanTest { @Test public void testSimpleArrayValues() throws Exception { - SimpleArrayClassVariant expected = SimpleArrayClassVariant.builder().ints(new Integer[] { 1, 2, 3 }) - .variantName("simple-array").longs(new Long[] { 1L, 2L, 3L }).strings(new String[] { "a", "b", "c" }) + SimpleArrayClassVariant expected = SimpleArrayClassVariant.builder() + .ints(new Integer[] {1, 2, 3}) + .variantName("simple-array") + .longs(new Long[] {1L, 2L, 3L}) + .strings(new String[] {"a", "b", "c"}) .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(SimpleArrayClassVariant.class, convertUtilsBean); @@ -503,12 +541,20 @@ public class BuilderDynaBeanTest { @Test public void testComplexArrayValuesBuilder() throws Exception { - TestVariantBuilder variant1 = TestVariantBuilder.builder().variantBuilderName("variant-1") - .testEnum(TestEnum.Green).intClass(10).build(); - TestVariantBuilder variant2 = TestVariantBuilder.builder().variantBuilderName("variant-2") - .testEnum(TestEnum.Blue).intClass(20).build(); - ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder().variantName("complex-test") - .tests(new TestInterface[] { variant1, variant2 }).build(); + TestVariantBuilder variant1 = TestVariantBuilder.builder() + .variantBuilderName("variant-1") + .testEnum(TestEnum.Green) + .intClass(10) + .build(); + TestVariantBuilder variant2 = TestVariantBuilder.builder() + .variantBuilderName("variant-2") + .testEnum(TestEnum.Blue) + .intClass(20) + .build(); + ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder() + .variantName("complex-test") + .tests(new TestInterface[] {variant1, variant2}) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(ComplexArrayClassVariant.class, convertUtilsBean); @@ -533,18 +579,22 @@ public class BuilderDynaBeanTest { TestVariantCreate variant1 = TestVariantCreate.create("variant-1", 10L, "vary-1"); TestVariantCreate variant2 = TestVariantCreate.create("variant-2", 20L, "vary-2"); - ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder().variantName("create-test") - .tests(new TestInterface[] { variant1, variant2 }).build(); + ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder() + .variantName("create-test") + .tests(new TestInterface[] {variant1, variant2}) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(ComplexArrayClassVariant.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "variantName", expected.variantName); - utilsBean.setProperty(builderDynaBean, "tests[0].class", variant1.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, "tests[0].class", variant1.getClass().getName()); utilsBean.setProperty(builderDynaBean, "tests[0].[0]", variant1.variantCreateName); utilsBean.setProperty(builderDynaBean, "tests[0].[1]", variant1.longClass); utilsBean.setProperty(builderDynaBean, "tests[0].[2]", variant1.varyString); - utilsBean.setProperty(builderDynaBean, "tests[1].class", variant2.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, "tests[1].class", variant2.getClass().getName()); utilsBean.setProperty(builderDynaBean, "tests[1].[0]", variant2.variantCreateName); utilsBean.setProperty(builderDynaBean, "tests[1].[1]", variant2.longClass); utilsBean.setProperty(builderDynaBean, "tests[1].[2]", variant2.varyString); @@ -552,7 +602,6 @@ public class BuilderDynaBeanTest { ComplexArrayClassVariant actual = builderDynaBean.build(ComplexArrayClassVariant.class); assertThat(actual, equalTo(expected)); - } @Test @@ -562,13 +611,18 @@ public class BuilderDynaBeanTest { if (i % 2 == 0) { variants[i] = TestVariantCreate.create("create-variant-" + i, i + 5, "vary-" + i); } else { - variants[i] = TestVariantBuilder.builder().testEnum(TestEnum.values()[i % TestEnum.values().length]) - .intClass(i).variantBuilderName("builder-variant-" + i).build(); + variants[i] = TestVariantBuilder.builder() + .testEnum(TestEnum.values()[i % TestEnum.values().length]) + .intClass(i) + .variantBuilderName("builder-variant-" + i) + .build(); } } - ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder().variantName("large-complex") - .tests(variants).build(); + ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder() + .variantName("large-complex") + .tests(variants) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(ComplexArrayClassVariant.class, convertUtilsBean); @@ -578,13 +632,15 @@ public class BuilderDynaBeanTest { TestInterface variant = variants[i]; if (variant instanceof TestVariantCreate) { TestVariantCreate create = (TestVariantCreate) variant; - utilsBean.setProperty(builderDynaBean, prefix + "class", create.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, prefix + "class", create.getClass().getName()); utilsBean.setProperty(builderDynaBean, prefix + "[0]", create.variantCreateName); utilsBean.setProperty(builderDynaBean, prefix + "[1]", create.longClass); utilsBean.setProperty(builderDynaBean, prefix + "[2]", create.varyString); } else if (variant instanceof TestVariantBuilder) { TestVariantBuilder builder = (TestVariantBuilder) variant; - utilsBean.setProperty(builderDynaBean, prefix + "class", builder.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, prefix + "class", builder.getClass().getName()); utilsBean.setProperty(builderDynaBean, prefix + "variantBuilderName", builder.variantBuilderName); utilsBean.setProperty(builderDynaBean, prefix + "intClass", builder.intClass); utilsBean.setProperty(builderDynaBean, prefix + "testEnum", builder.testEnum); @@ -667,25 +723,27 @@ public class BuilderDynaBeanTest { @Test public void testAdditionalMutators() throws Exception { - TestSimpleBuilder expected = TestSimpleBuilder.builder().stringL1("test").longVal(10L).build(); + TestSimpleBuilder expected = + TestSimpleBuilder.builder().stringL1("test").longVal(10L).build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "stringL1", expected.stringL1); - TestSimpleBuilder actual = builderDynaBean.build(TestSimpleBuilder.class, - b -> ((TestSimpleBuilder.TestSimpleBuilderBuilder) b).longVal(expected.longVal)); + TestSimpleBuilder actual = + builderDynaBean.build(TestSimpleBuilder.class, b -> ((TestSimpleBuilder.TestSimpleBuilderBuilder) b) + .longVal(expected.longVal)); assertThat(actual, equalTo(expected)); } public enum TestEnum { - Red, Green, Blue + Red, + Green, + Blue } - public interface TestInterface { - - } + public interface TestInterface {} @Accessors(fluent = true) @ToString @@ -838,7 +896,5 @@ public class BuilderDynaBeanTest { } public String name = "default"; - } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtilsTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtilsTest.java index 96de848e..5e0db340 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtilsTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtilsTest.java @@ -15,18 +15,17 @@ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertThat; - import java.util.Optional; -import org.junit.Test; - import lombok.Builder; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.Setter; import lombok.experimental.Accessors; +import org.junit.Test; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; public class ConfigurationSettableUtilsTest { @@ -44,7 +43,10 @@ public class ConfigurationSettableUtilsTest { public void testPrimitivesSet() { ConfigResult expected = ConfigResult.builder().rawInt(10).rawLong(15L).build(); - ConfigObject configObject = ConfigObject.builder().rawInt(expected.rawInt).rawLong(expected.rawLong).build(); + ConfigObject configObject = ConfigObject.builder() + .rawInt(expected.rawInt) + .rawLong(expected.rawLong) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); @@ -52,10 +54,14 @@ public class ConfigurationSettableUtilsTest { @Test public void testHeapValuesSet() { - ConfigResult expected = ConfigResult.builder().name("test").boxedInt(10).boxedLong(15L).build(); + ConfigResult expected = + ConfigResult.builder().name("test").boxedInt(10).boxedLong(15L).build(); - ConfigObject configObject = ConfigObject.builder().name(expected.name).boxedInt(expected.boxedInt.intValue()) - .boxedLong(expected.boxedLong.longValue()).build(); + ConfigObject configObject = ConfigObject.builder() + .name(expected.name) + .boxedInt(expected.boxedInt.intValue()) + .boxedLong(expected.boxedLong.longValue()) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); @@ -63,27 +69,39 @@ public class ConfigurationSettableUtilsTest { @Test public void testComplexValuesSet() { - ComplexValue complexValue = ComplexValue.builder().name("complex").value(10).build(); - ConfigResult expected = ConfigResult.builder().complexValue(complexValue).build(); + ComplexValue complexValue = + ComplexValue.builder().name("complex").value(10).build(); + ConfigResult expected = + ConfigResult.builder().complexValue(complexValue).build(); ConfigObject configObject = ConfigObject.builder() - .complexValue(ComplexValue.builder().name(complexValue.name).value(complexValue.value).build()).build(); + .complexValue(ComplexValue.builder() + .name(complexValue.name) + .value(complexValue.value) + .build()) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); - } @Test public void testOptionalValuesSet() { - ComplexValue complexValue = ComplexValue.builder().name("optional-complex").value(20).build(); - ConfigResult expected = ConfigResult.builder().optionalString(Optional.of("test")) - .optionalInteger(Optional.of(10)).optionalLong(Optional.of(15L)) - .optionalComplexValue(Optional.of(complexValue)).build(); + ComplexValue complexValue = + ComplexValue.builder().name("optional-complex").value(20).build(); + ConfigResult expected = ConfigResult.builder() + .optionalString(Optional.of("test")) + .optionalInteger(Optional.of(10)) + .optionalLong(Optional.of(15L)) + .optionalComplexValue(Optional.of(complexValue)) + .build(); - ConfigObject configObject = ConfigObject.builder().optionalString(expected.optionalString.get()) - .optionalInteger(expected.optionalInteger.get()).optionalLong(expected.optionalLong.get()) - .optionalComplexValue(expected.optionalComplexValue.get()).build(); + ConfigObject configObject = ConfigObject.builder() + .optionalString(expected.optionalString.get()) + .optionalInteger(expected.optionalInteger.get()) + .optionalLong(expected.optionalLong.get()) + .optionalComplexValue(expected.optionalComplexValue.get()) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); @@ -91,20 +109,29 @@ public class ConfigurationSettableUtilsTest { @Test public void testRenamedRawValues() { - ComplexValue complexValue = ComplexValue.builder().name("renamed-complex").value(20).build(); - ConfigResult expected = ConfigResult.builder().renamedString("renamed").renamedInt(10) - .renamedOptionalString(Optional.of("renamed-optional")).renamedComplexValue(complexValue).build(); + ComplexValue complexValue = + ComplexValue.builder().name("renamed-complex").value(20).build(); + ConfigResult expected = ConfigResult.builder() + .renamedString("renamed") + .renamedInt(10) + .renamedOptionalString(Optional.of("renamed-optional")) + .renamedComplexValue(complexValue) + .build(); - ConfigObject configObject = ConfigObject.builder().toRenameString(expected.renamedString) - .toRenameInt(expected.renamedInt).toRenameComplexValue(complexValue) - .optionalToRename(expected.renamedOptionalString.get()).build(); + ConfigObject configObject = ConfigObject.builder() + .toRenameString(expected.renamedString) + .toRenameInt(expected.renamedInt) + .toRenameComplexValue(complexValue) + .optionalToRename(expected.renamedOptionalString.get()) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); } private ConfigResult resolve(ConfigObject configObject) { - return ConfigurationSettableUtils.resolveFields(configObject, ConfigResult.builder().build()); + return ConfigurationSettableUtils.resolveFields( + configObject, ConfigResult.builder().build()); } @Accessors(fluent = true) @@ -129,7 +156,6 @@ public class ConfigurationSettableUtilsTest { private int renamedInt; private Optional renamedOptionalString; private ComplexValue renamedComplexValue; - } @Accessors(fluent = true) @@ -145,35 +171,47 @@ public class ConfigurationSettableUtilsTest { @ConfigurationSettable(configurationClass = ConfigResult.class) private String name; + @ConfigurationSettable(configurationClass = ConfigResult.class) private int rawInt; + @ConfigurationSettable(configurationClass = ConfigResult.class) private Integer boxedInt; + @ConfigurationSettable(configurationClass = ConfigResult.class) private long rawLong; + @ConfigurationSettable(configurationClass = ConfigResult.class) private Long boxedLong; + @ConfigurationSettable(configurationClass = ConfigResult.class) private ComplexValue complexValue; @ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true) private String optionalString; + @ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true) private Integer optionalInteger; + @ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true) private Long optionalLong; + @ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true) private ComplexValue optionalComplexValue; @ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedString") private String toRenameString; + @ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedInt") private int toRenameInt; - @ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedOptionalString", convertToOptional = true) + + @ConfigurationSettable( + configurationClass = ConfigResult.class, + methodName = "renamedOptionalString", + convertToOptional = true) private String optionalToRename; + @ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedComplexValue") private ComplexValue toRenameComplexValue; - } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoderTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoderTest.java index a3aace72..d274f9f2 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoderTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoderTest.java @@ -14,12 +14,12 @@ */ package software.amazon.kinesis.multilang.config; -import static org.junit.Assert.assertEquals; - import java.util.Date; import org.junit.Test; +import static org.junit.Assert.assertEquals; + public class DatePropertyValueDecoderTest { private DatePropertyValueDecoder decoder = new DatePropertyValueDecoder(); diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/FanoutConfigBeanTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/FanoutConfigBeanTest.java index 11b12588..255a07ff 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/FanoutConfigBeanTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/FanoutConfigBeanTest.java @@ -21,7 +21,6 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.retrieval.fanout.FanOutConfig; @@ -50,18 +49,22 @@ public class FanoutConfigBeanTest { MultiLangDaemonConfiguration configuration = new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean); configuration.setStreamName("test-stream"); configuration.setApplicationName("test-application"); - FanOutConfig fanOutConfig =fanoutConfigBean.build(kinesisAsyncClient, configuration); + FanOutConfig fanOutConfig = fanoutConfigBean.build(kinesisAsyncClient, configuration); assertThat(fanOutConfig.kinesisClient(), equalTo(kinesisAsyncClient)); assertThat(fanOutConfig.streamName(), equalTo(configuration.getStreamName())); assertThat(fanOutConfig.applicationName(), equalTo(configuration.getApplicationName())); assertThat(fanOutConfig.consumerArn(), equalTo(fanoutConfigBean.getConsumerArn())); assertThat(fanOutConfig.consumerName(), equalTo(fanoutConfigBean.getConsumerName())); - assertThat(fanOutConfig.maxDescribeStreamConsumerRetries(), equalTo(fanoutConfigBean.getMaxDescribeStreamConsumerRetries())); - assertThat(fanOutConfig.maxDescribeStreamSummaryRetries(), equalTo(fanoutConfigBean.getMaxDescribeStreamSummaryRetries())); - assertThat(fanOutConfig.registerStreamConsumerRetries(), equalTo(fanoutConfigBean.getRegisterStreamConsumerRetries())); + assertThat( + fanOutConfig.maxDescribeStreamConsumerRetries(), + equalTo(fanoutConfigBean.getMaxDescribeStreamConsumerRetries())); + assertThat( + fanOutConfig.maxDescribeStreamSummaryRetries(), + equalTo(fanoutConfigBean.getMaxDescribeStreamSummaryRetries())); + assertThat( + fanOutConfig.registerStreamConsumerRetries(), + equalTo(fanoutConfigBean.getRegisterStreamConsumerRetries())); assertThat(fanOutConfig.retryBackoffMillis(), equalTo(fanoutConfigBean.getRetryBackoffMillis())); - } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java index 031fc427..b0e3b870 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java @@ -14,15 +14,6 @@ */ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.ByteArrayInputStream; import java.io.InputStream; import java.net.URI; @@ -34,42 +25,45 @@ import java.util.Set; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; +import com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.exception.ExceptionUtils; -import org.junit.Rule; import org.junit.Test; - -import com.google.common.collect.ImmutableSet; - -import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; -import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @RunWith(MockitoJUnitRunner.class) public class KinesisClientLibConfiguratorTest { - private String credentialName1 = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProvider"; - private String credentialName2 = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysFailCredentialsProvider"; - private String credentialNameKinesis = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderKinesis"; - private String credentialNameDynamoDB = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderDynamoDB"; - private String credentialNameCloudWatch = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderCloudWatch"; - private KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator(); - - @Rule - public final ExpectedException thrown = ExpectedException.none(); - - @Mock - private ShardRecordProcessorFactory shardRecordProcessorFactory; + private final String credentialName1 = AlwaysSucceedCredentialsProvider.class.getName(); + private final String credentialName2 = AlwaysFailCredentialsProvider.class.getName(); + private final String credentialNameKinesis = AlwaysSucceedCredentialsProviderKinesis.class.getName(); + private final String credentialNameDynamoDB = AlwaysSucceedCredentialsProviderDynamoDB.class.getName(); + private final String credentialNameCloudWatch = AlwaysSucceedCredentialsProviderCloudWatch.class.getName(); + private final KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator(); @Test public void testWithBasicSetup() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = " + credentialName1, "workerId = 123" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123" + }, + '\n')); assertEquals(config.getApplicationName(), "b"); assertEquals(config.getStreamName(), "a"); assertEquals(config.getWorkerIdentifier(), "123"); @@ -79,9 +73,16 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithLongVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "applicationName = app", - "streamName = 123", "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, - "workerId = 123", "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "applicationName = app", + "streamName = 123", + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "workerId = 123", + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, + '\n')); assertEquals(config.getApplicationName(), "app"); assertEquals(config.getStreamName(), "123"); @@ -93,9 +94,14 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithInitialPositionInStreamExtended() { long epochTimeInSeconds = 1617406032; - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "applicationName = app", - "streamName = 123", "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, - "initialPositionInStreamExtended = " + epochTimeInSeconds}, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "applicationName = app", + "streamName = 123", + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "initialPositionInStreamExtended = " + epochTimeInSeconds + }, + '\n')); assertEquals(config.getInitialPositionInStreamExtended().getTimestamp(), new Date(epochTimeInSeconds * 1000L)); assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.AT_TIMESTAMP); @@ -106,9 +112,14 @@ public class KinesisClientLibConfiguratorTest { // AT_TIMESTAMP cannot be used as initialPositionInStream. If a user wants to specify AT_TIMESTAMP, // they must specify the time with initialPositionInStreamExtended. try { - getConfiguration(StringUtils.join(new String[] { "applicationName = app", - "streamName = 123", "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, - "initialPositionInStream = AT_TIMESTAMP"}, '\n')); + getConfiguration(StringUtils.join( + new String[] { + "applicationName = app", + "streamName = 123", + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "initialPositionInStream = AT_TIMESTAMP" + }, + '\n')); fail("Should have thrown when initialPositionInStream is set to AT_TIMESTAMP"); } catch (Exception e) { Throwable rootCause = ExceptionUtils.getRootCause(e); @@ -121,9 +132,14 @@ public class KinesisClientLibConfiguratorTest { // initialPositionInStreamExtended takes a long value indicating seconds since epoch. If a non-long // value is provided, the constructor should throw an IllegalArgumentException exception. try { - getConfiguration(StringUtils.join(new String[] { "applicationName = app", - "streamName = 123", "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, - "initialPositionInStreamExtended = null"}, '\n')); + getConfiguration(StringUtils.join( + new String[] { + "applicationName = app", + "streamName = 123", + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "initialPositionInStreamExtended = null" + }, + '\n')); fail("Should have thrown when initialPositionInStreamExtended is set to null"); } catch (Exception e) { Throwable rootCause = ExceptionUtils.getRootCause(e); @@ -134,8 +150,13 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithUnsupportedClientConfigurationVariables() { MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( - new String[] { "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, "workerId = id", - "kinesisClientConfig = {}", "streamName = stream", "applicationName = b" }, + new String[] { + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "workerId = id", + "kinesisClientConfig = {}", + "streamName = stream", + "applicationName = b" + }, '\n')); assertEquals(config.getApplicationName(), "b"); @@ -146,10 +167,18 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithIntVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = kinesis", - "AWSCredentialsProvider = " + credentialName2 + ", " + credentialName1, "workerId = w123", - "maxRecords = 10", "metricsMaxQueueSize = 20", "applicationName = kinesis", - "retryGetRecordsInSeconds = 2", "maxGetRecordsThreadPool = 1" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = kinesis", + "AWSCredentialsProvider = " + credentialName2 + ", " + credentialName1, + "workerId = w123", + "maxRecords = 10", + "metricsMaxQueueSize = 20", + "applicationName = kinesis", + "retryGetRecordsInSeconds = 2", + "maxGetRecordsThreadPool = 1" + }, + '\n')); assertEquals(config.getApplicationName(), "kinesis"); assertEquals(config.getStreamName(), "kinesis"); @@ -162,9 +191,15 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithBooleanVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD, " + credentialName1, "workerId = 0", - "cleanupLeasesUponShardCompletion = false", "validateSequenceNumberBeforeCheckpointing = true" }, + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD, " + credentialName1, + "workerId = 0", + "cleanupLeasesUponShardCompletion = false", + "validateSequenceNumberBeforeCheckpointing = true" + }, '\n')); assertEquals(config.getApplicationName(), "b"); @@ -176,9 +211,16 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithStringVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 1", - "kinesisEndpoint = https://kinesis", "metricsLevel = SUMMARY" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 1", + "kinesisEndpoint = https://kinesis", + "metricsLevel = SUMMARY" + }, + '\n')); assertEquals(config.getWorkerIdentifier(), "1"); assertEquals(config.getKinesisClient().get("endpointOverride"), URI.create("https://kinesis")); @@ -187,38 +229,66 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithSetVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 1", - "metricsEnabledDimensions = ShardId, WorkerIdentifier" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 1", + "metricsEnabledDimensions = ShardId, WorkerIdentifier" + }, + '\n')); - Set expectedMetricsEnabledDimensions = ImmutableSet. builder() - .add("ShardId", "WorkerIdentifier").build(); - assertThat(new HashSet<>(Arrays.asList(config.getMetricsEnabledDimensions())), equalTo(expectedMetricsEnabledDimensions)); + Set expectedMetricsEnabledDimensions = ImmutableSet.builder() + .add("ShardId", "WorkerIdentifier") + .build(); + assertThat( + new HashSet<>(Arrays.asList(config.getMetricsEnabledDimensions())), + equalTo(expectedMetricsEnabledDimensions)); } @Test public void testWithInitialPositionInStreamTrimHorizon() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon" + }, + '\n')); assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON); } @Test public void testWithInitialPositionInStreamLatest() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = LateSt" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = LateSt" + }, + '\n')); assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.LATEST); } @Test public void testSkippingNonKCLVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon", "abc = 1" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon", + "abc = 1" + }, + '\n')); assertEquals(config.getApplicationName(), "b"); assertEquals(config.getStreamName(), "a"); @@ -228,118 +298,159 @@ public class KinesisClientLibConfiguratorTest { @Test public void testEmptyOptionalVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon", "maxGetRecordsThreadPool = 1" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon", + "maxGetRecordsThreadPool = 1" + }, + '\n')); assertThat(config.getMaxGetRecordsThreadPool(), equalTo(1)); assertThat(config.getRetryGetRecordsInSeconds(), nullValue()); } @Test public void testWithZeroValue() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon", "maxGetRecordsThreadPool = 0", - "retryGetRecordsInSeconds = 0" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - try { - configurator.getConfiguration(input); - } catch (Exception e) { - fail("Don't expect to fail on invalid variable value"); - - } + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon", + "maxGetRecordsThreadPool = 0", + "retryGetRecordsInSeconds = 0" + }, + '\n'); + getConfiguration(test); } @Test public void testWithInvalidIntValue() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = 100nf" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - try { - configurator.getConfiguration(input); - } catch (Exception e) { - fail("Don't expect to fail on invalid variable value"); - } + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100nf" + }, + '\n'); + getConfiguration(test); } @Test public void testWithNegativeIntValue() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = -12" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = -12" + }, + '\n'); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - try { - configurator.getConfiguration(input); - } catch (Exception e) { - fail("Don't expect to fail on invalid variable value"); - } + getConfiguration(test); } - @Test + @Test(expected = IllegalArgumentException.class) public void testWithMissingCredentialsProvider() { - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("A basic set of AWS credentials must be provided"); - - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", "workerId = 123", - "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "workerId = 123", + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, + '\n'); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - configurator.getConfiguration(input); + getConfiguration(test); } @Test public void testWithMissingWorkerId() { String test = StringUtils.join( - new String[] { "streamName = a", "applicationName = b", "AWSCredentialsProvider = " + credentialName1, - "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - MultiLangDaemonConfiguration config = configurator.getConfiguration(input); + MultiLangDaemonConfiguration config = getConfiguration(test); // if workerId is not provided, configurator should assign one for it automatically assertNotNull(config.getWorkerIdentifier()); assertFalse(config.getWorkerIdentifier().isEmpty()); } - @Test - public void testWithMissingStreamName() { - thrown.expect(NullPointerException.class); - thrown.expectMessage("Stream name is required"); - - String test = StringUtils.join(new String[] { "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = 100" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - configurator.getConfiguration(input); + @Test(expected = NullPointerException.class) + public void testWithMissingStreamNameAndMissingStreamArn() { + String test = StringUtils.join( + new String[] { + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100" + }, + '\n'); + getConfiguration(test); } - @Test - public void testWithMissingApplicationName() { - thrown.expect(NullPointerException.class); - thrown.expectMessage("Application name is required"); + @Test(expected = IllegalArgumentException.class) + public void testWithEmptyStreamNameAndMissingStreamArn() { + String test = StringUtils.join( + new String[] { + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100", + "streamName = ", + "streamArn = " + }, + '\n'); + getConfiguration(test); + } - String test = StringUtils.join(new String[] { "streamName = a", "AWSCredentialsProvider = " + credentialName1, - "workerId = 123", "failoverTimeMillis = 100" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - configurator.getConfiguration(input); + @Test(expected = NullPointerException.class) + public void testWithMissingApplicationName() { + String test = StringUtils.join( + new String[] { + "streamName = a", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100" + }, + '\n'); + getConfiguration(test); } @Test public void testWithAWSCredentialsFailed() { String test = StringUtils.join( - new String[] { "streamName = a", "applicationName = b", "AWSCredentialsProvider = " + credentialName2, - "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName2, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); + MultiLangDaemonConfiguration config = getConfiguration(test); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement try { - MultiLangDaemonConfiguration config = configurator.getConfiguration(input); - config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); + config.getKinesisCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); fail("expect failure with wrong credentials provider"); } catch (Exception e) { // succeed @@ -349,59 +460,63 @@ public class KinesisClientLibConfiguratorTest { // TODO: fix this test @Test public void testWithDifferentAWSCredentialsForDynamoDBAndCloudWatch() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialNameKinesis, - "AWSCredentialsProviderDynamoDB = " + credentialNameDynamoDB, - "AWSCredentialsProviderCloudWatch = " + credentialNameCloudWatch, "failoverTimeMillis = 100", - "shardSyncIntervalMillis = 500" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialNameKinesis, + "AWSCredentialsProviderDynamoDB = " + credentialNameDynamoDB, + "AWSCredentialsProviderCloudWatch = " + credentialNameCloudWatch, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, + '\n'); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - MultiLangDaemonConfiguration config = configurator.getConfiguration(input); - try { - config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); - } catch (Exception e) { - fail("Kinesis credential providers should not fail."); - } - try { - config.getDynamoDBCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); - } catch (Exception e) { - fail("DynamoDB credential providers should not fail."); - } - try { - config.getCloudWatchCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); - } catch (Exception e) { - fail("CloudWatch credential providers should not fail."); - } + final MultiLangDaemonConfiguration config = getConfiguration(test); + config.getKinesisCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); + config.getDynamoDBCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); + config.getCloudWatchCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); } // TODO: fix this test @Test public void testWithDifferentAWSCredentialsForDynamoDBAndCloudWatchFailed() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialNameKinesis, - "AWSCredentialsProviderDynamoDB = " + credentialName2, - "AWSCredentialsProviderCloudWatch = " + credentialName2, "failoverTimeMillis = 100", - "shardSyncIntervalMillis = 500" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialNameKinesis, + "AWSCredentialsProviderDynamoDB = " + credentialName2, + "AWSCredentialsProviderCloudWatch = " + credentialName2, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, + '\n'); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - - // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - MultiLangDaemonConfiguration config = configurator.getConfiguration(input); + final MultiLangDaemonConfiguration config = getConfiguration(test); + config.getKinesisCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); try { - config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); - } catch (Exception e) { - fail("Kinesis credential providers should not fail."); - } - try { - config.getDynamoDBCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); + config.getDynamoDBCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); fail("DynamoDB credential providers should fail."); } catch (Exception e) { // succeed } try { - config.getCloudWatchCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); + config.getCloudWatchCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); fail("CloudWatch credential providers should fail."); } catch (Exception e) { // succeed @@ -419,9 +534,7 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -435,9 +548,7 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -451,9 +562,7 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -467,9 +576,7 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -483,14 +590,11 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } private MultiLangDaemonConfiguration getConfiguration(String configString) { InputStream input = new ByteArrayInputStream(configString.getBytes()); - MultiLangDaemonConfiguration config = configurator.getConfiguration(input); - return config; + return configurator.getConfiguration(input); } } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfigurationTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfigurationTest.java index 07f8082b..1c45eb6e 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfigurationTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfigurationTest.java @@ -15,10 +15,6 @@ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertThat; - import org.apache.commons.beanutils.BeanUtilsBean; import org.apache.commons.beanutils.ConvertUtilsBean; import org.junit.After; @@ -29,12 +25,18 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.retrieval.fanout.FanOutConfig; import software.amazon.kinesis.retrieval.polling.PollingConfig; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + @RunWith(MockitoJUnitRunner.class) public class MultiLangDaemonConfigurationTest { @@ -67,7 +69,6 @@ public class MultiLangDaemonConfigurationTest { } } - public MultiLangDaemonConfiguration baseConfiguration() { MultiLangDaemonConfiguration configuration = new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean); configuration.setApplicationName("Test"); @@ -82,33 +83,98 @@ public class MultiLangDaemonConfigurationTest { MultiLangDaemonConfiguration configuration = baseConfiguration(); configuration.setMaxLeasesForWorker(10); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); assertThat(resolvedConfiguration.leaseManagementConfig.maxLeasesForWorker(), equalTo(10)); } + @Test + public void testSetEnablePriorityLeaseAssignment() { + MultiLangDaemonConfiguration configuration = baseConfiguration(); + configuration.setEnablePriorityLeaseAssignment(false); + + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); + + assertThat(resolvedConfiguration.leaseManagementConfig.enablePriorityLeaseAssignment(), equalTo(false)); + } + + @Test + public void testSetLeaseTableDeletionProtectionEnabledToTrue() { + MultiLangDaemonConfiguration configuration = baseConfiguration(); + configuration.setLeaseTableDeletionProtectionEnabled(true); + + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); + + assertTrue(resolvedConfiguration.leaseManagementConfig.leaseTableDeletionProtectionEnabled()); + } + + @Test + public void testSetLeaseTablePitrEnabledToTrue() { + MultiLangDaemonConfiguration configuration = baseConfiguration(); + configuration.setLeaseTablePitrEnabled(true); + + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); + + assertTrue(resolvedConfiguration.leaseManagementConfig.leaseTablePitrEnabled()); + } + + @Test + public void testSetLeaseTableDeletionProtectionEnabledToFalse() { + MultiLangDaemonConfiguration configuration = baseConfiguration(); + configuration.setLeaseTableDeletionProtectionEnabled(false); + + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); + + assertFalse(resolvedConfiguration.leaseManagementConfig.leaseTableDeletionProtectionEnabled()); + } + + @Test + public void testSetLeaseTablePitrEnabledToFalse() { + MultiLangDaemonConfiguration configuration = baseConfiguration(); + configuration.setLeaseTablePitrEnabled(false); + + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); + + assertFalse(resolvedConfiguration.leaseManagementConfig.leaseTablePitrEnabled()); + } + @Test public void testDefaultRetrievalConfig() { MultiLangDaemonConfiguration configuration = baseConfiguration(); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(FanOutConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class)); } @Test public void testDefaultRetrievalConfigWithPollingConfigSet() { MultiLangDaemonConfiguration configuration = baseConfiguration(); configuration.setMaxRecords(10); + configuration.setIdleTimeBetweenReadsInMillis(60000); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(PollingConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(PollingConfig.class)); + assertEquals( + 10, + ((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig()).maxRecords()); + assertEquals( + 60000, + ((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig()) + .idleTimeBetweenReadsInMillis()); + assertTrue(((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig()) + .usePollingConfigIdleTimeValue()); } @Test @@ -116,11 +182,11 @@ public class MultiLangDaemonConfigurationTest { MultiLangDaemonConfiguration configuration = baseConfiguration(); configuration.setRetrievalMode(RetrievalMode.FANOUT); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(FanOutConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class)); } @Test @@ -128,37 +194,39 @@ public class MultiLangDaemonConfigurationTest { MultiLangDaemonConfiguration configuration = baseConfiguration(); configuration.setRetrievalMode(RetrievalMode.POLLING); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(PollingConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(PollingConfig.class)); } @Test public void testRetrievalModeSetForPollingString() throws Exception { MultiLangDaemonConfiguration configuration = baseConfiguration(); - utilsBean.setProperty(configuration, "retrievalMode", RetrievalMode.POLLING.name().toLowerCase()); + utilsBean.setProperty( + configuration, "retrievalMode", RetrievalMode.POLLING.name().toLowerCase()); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(PollingConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(PollingConfig.class)); } @Test public void testRetrievalModeSetForFanoutString() throws Exception { MultiLangDaemonConfiguration configuration = baseConfiguration(); - utilsBean.setProperty(configuration, "retrievalMode", RetrievalMode.FANOUT.name().toLowerCase()); + utilsBean.setProperty( + configuration, "retrievalMode", RetrievalMode.FANOUT.name().toLowerCase()); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(FanOutConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class)); } @Test @@ -175,7 +243,7 @@ public class MultiLangDaemonConfigurationTest { // TODO : Enable this test once https://github.com/awslabs/amazon-kinesis-client/issues/692 is resolved public void testmetricsEnabledDimensions() { MultiLangDaemonConfiguration configuration = baseConfiguration(); - configuration.setMetricsEnabledDimensions(new String[]{"Operation"}); + configuration.setMetricsEnabledDimensions(new String[] {"Operation"}); configuration.resolvedConfiguration(shardRecordProcessorFactory); } @@ -188,14 +256,14 @@ public class MultiLangDaemonConfigurationTest { configuration.setRetrievalMode(RetrievalMode.FANOUT); configuration.getFanoutConfig().setConsumerArn(consumerArn); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(FanOutConfig.class)); - FanOutConfig fanOutConfig = (FanOutConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class)); + FanOutConfig fanOutConfig = + (FanOutConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(); assertThat(fanOutConfig.consumerArn(), equalTo(consumerArn)); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/PollingConfigBeanTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/PollingConfigBeanTest.java index 2d032728..576e6101 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/PollingConfigBeanTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/PollingConfigBeanTest.java @@ -15,6 +15,8 @@ package software.amazon.kinesis.multilang.config; +import java.util.Optional; + import org.apache.commons.beanutils.BeanUtilsBean; import org.apache.commons.beanutils.ConvertUtilsBean; import org.junit.Test; @@ -24,8 +26,6 @@ import org.mockito.runners.MockitoJUnitRunner; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.retrieval.polling.PollingConfig; -import java.util.Optional; - import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertThat; @@ -46,17 +46,23 @@ public class PollingConfigBeanTest { ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean(); BeanUtilsBean utilsBean = new BeanUtilsBean(convertUtilsBean); - MultiLangDaemonConfiguration multiLangDaemonConfiguration = new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean); + MultiLangDaemonConfiguration multiLangDaemonConfiguration = + new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean); multiLangDaemonConfiguration.setStreamName("test-stream"); PollingConfig pollingConfig = pollingConfigBean.build(kinesisAsyncClient, multiLangDaemonConfiguration); assertThat(pollingConfig.kinesisClient(), equalTo(kinesisAsyncClient)); assertThat(pollingConfig.streamName(), equalTo(multiLangDaemonConfiguration.getStreamName())); - assertThat(pollingConfig.idleTimeBetweenReadsInMillis(), equalTo(pollingConfigBean.getIdleTimeBetweenReadsInMillis())); - assertThat(pollingConfig.maxGetRecordsThreadPool(), equalTo(Optional.of(pollingConfigBean.getMaxGetRecordsThreadPool()))); + assertThat( + pollingConfig.idleTimeBetweenReadsInMillis(), + equalTo(pollingConfigBean.getIdleTimeBetweenReadsInMillis())); + assertThat( + pollingConfig.maxGetRecordsThreadPool(), + equalTo(Optional.of(pollingConfigBean.getMaxGetRecordsThreadPool()))); assertThat(pollingConfig.maxRecords(), equalTo(pollingConfigBean.getMaxRecords())); - assertThat(pollingConfig.retryGetRecordsInSeconds(), equalTo(Optional.of(pollingConfigBean.getRetryGetRecordsInSeconds()))); + assertThat( + pollingConfig.retryGetRecordsInSeconds(), + equalTo(Optional.of(pollingConfigBean.getRetryGetRecordsInSeconds()))); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecordTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecordTest.java index 4aabfb33..b817da05 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecordTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecordTest.java @@ -15,11 +15,6 @@ package software.amazon.kinesis.multilang.messages; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.CoreMatchers.sameInstance; -import static org.junit.Assert.assertThat; - import java.nio.ByteBuffer; import java.time.Instant; import java.util.Arrays; @@ -31,9 +26,13 @@ import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; import org.junit.Test; - import software.amazon.kinesis.retrieval.KinesisClientRecord; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.CoreMatchers.sameInstance; +import static org.junit.Assert.assertThat; + public class JsonFriendlyRecordTest { private KinesisClientRecord kinesisClientRecord; @@ -48,7 +47,7 @@ public class JsonFriendlyRecordTest { @Test public void testRecordHandlesNoByteArrayBuffer() { - byte[] expected = new byte[] { 1, 2, 3, 4 }; + byte[] expected = new byte[] {1, 2, 3, 4}; ByteBuffer expectedBuffer = ByteBuffer.allocateDirect(expected.length); @@ -64,7 +63,7 @@ public class JsonFriendlyRecordTest { @Test public void testRecordHandlesArrayByteBuffer() { - ByteBuffer expected = ByteBuffer.wrap(new byte[] { 1, 2, 3, 4 }); + ByteBuffer expected = ByteBuffer.wrap(new byte[] {1, 2, 3, 4}); kinesisClientRecord = defaultRecord().data(expected).build(); JsonFriendlyRecord jsonFriendlyRecord = JsonFriendlyRecord.fromKinesisClientRecord(kinesisClientRecord); @@ -82,14 +81,15 @@ public class JsonFriendlyRecordTest { private RecordMatcher(KinesisClientRecord expected) { this.matchers = Arrays.asList( - new FieldMatcher<>("approximateArrivalTimestamp", + new FieldMatcher<>( + "approximateArrivalTimestamp", equalTo(expected.approximateArrivalTimestamp().toEpochMilli()), JsonFriendlyRecord::getApproximateArrivalTimestamp), new FieldMatcher<>("partitionKey", expected::partitionKey, JsonFriendlyRecord::getPartitionKey), - new FieldMatcher<>("sequenceNumber", expected::sequenceNumber, - JsonFriendlyRecord::getSequenceNumber), - new FieldMatcher<>("subSequenceNumber", expected::subSequenceNumber, - JsonFriendlyRecord::getSubSequenceNumber), + new FieldMatcher<>( + "sequenceNumber", expected::sequenceNumber, JsonFriendlyRecord::getSequenceNumber), + new FieldMatcher<>( + "subSequenceNumber", expected::subSequenceNumber, JsonFriendlyRecord::getSubSequenceNumber), new FieldMatcher<>("data", dataEquivalentTo(expected.data()), JsonFriendlyRecord::getData)); this.expected = expected; @@ -97,13 +97,16 @@ public class JsonFriendlyRecordTest { @Override protected boolean matchesSafely(JsonFriendlyRecord item, Description mismatchDescription) { - return matchers.stream().map(m -> { - if (!m.matches(item)) { - m.describeMismatch(item, mismatchDescription); - return false; - } - return true; - }).reduce((l, r) -> l && r).orElse(true); + return matchers.stream() + .map(m -> { + if (!m.matches(item)) { + m.describeMismatch(item, mismatchDescription); + return false; + } + return true; + }) + .reduce((l, r) -> l && r) + .orElse(true); } @Override @@ -160,8 +163,9 @@ public class JsonFriendlyRecordTest { } private KinesisClientRecord.KinesisClientRecordBuilder defaultRecord() { - return KinesisClientRecord.builder().partitionKey("test-partition").sequenceNumber("123") + return KinesisClientRecord.builder() + .partitionKey("test-partition") + .sequenceNumber("123") .approximateArrivalTimestamp(Instant.now()); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/MessageTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/MessageTest.java index 47337221..adbd17fa 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/MessageTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/MessageTest.java @@ -17,73 +17,65 @@ package software.amazon.kinesis.multilang.messages; import java.nio.ByteBuffer; import java.util.Collections; -import org.junit.Assert; -import org.junit.Test; - import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; - +import org.junit.Assert; +import org.junit.Test; +import software.amazon.kinesis.lifecycle.ShutdownReason; import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.multilang.messages.CheckpointMessage; -import software.amazon.kinesis.multilang.messages.InitializeMessage; -import software.amazon.kinesis.multilang.messages.Message; -import software.amazon.kinesis.multilang.messages.ProcessRecordsMessage; -import software.amazon.kinesis.multilang.messages.ShutdownMessage; -import software.amazon.kinesis.multilang.messages.ShutdownRequestedMessage; -import software.amazon.kinesis.multilang.messages.StatusMessage; import software.amazon.kinesis.retrieval.KinesisClientRecord; public class MessageTest { @Test public void toStringTest() { - Message[] messages = new Message[]{ - new CheckpointMessage("1234567890", 0L, null), - new InitializeMessage(InitializationInput.builder().shardId("shard-123").build()), - new ProcessRecordsMessage(ProcessRecordsInput.builder() - .records(Collections.singletonList( - KinesisClientRecord.builder() - .data(ByteBuffer.wrap("cat".getBytes())) - .partitionKey("cat") - .sequenceNumber("555") - .build())) - .build()), - new ShutdownMessage(ShutdownReason.LEASE_LOST), - new StatusMessage("processRecords"), - new InitializeMessage(), - new ProcessRecordsMessage(), - new ShutdownRequestedMessage(), - new LeaseLostMessage(), - new ShardEndedMessage() + Message[] messages = new Message[] { + new CheckpointMessage("1234567890", 0L, null), + new InitializeMessage( + InitializationInput.builder().shardId("shard-123").build()), + new ProcessRecordsMessage(ProcessRecordsInput.builder() + .records(Collections.singletonList(KinesisClientRecord.builder() + .data(ByteBuffer.wrap("cat".getBytes())) + .partitionKey("cat") + .sequenceNumber("555") + .build())) + .build()), + new ShutdownMessage(ShutdownReason.LEASE_LOST), + new StatusMessage("processRecords"), + new InitializeMessage(), + new ProcessRecordsMessage(), + new ShutdownRequestedMessage(), + new LeaseLostMessage(), + new ShardEndedMessage(), }; -// TODO: fix this + // TODO: fix this for (int i = 0; i < messages.length; i++) { System.out.println(messages[i].toString()); - Assert.assertTrue("Each message should contain the action field", messages[i].toString().contains("action")); + Assert.assertTrue( + "Each message should contain the action field", + messages[i].toString().contains("action")); } // Hit this constructor - KinesisClientRecord defaultJsonFriendlyRecord = KinesisClientRecord.builder().build(); + KinesisClientRecord defaultJsonFriendlyRecord = + KinesisClientRecord.builder().build(); Assert.assertNull(defaultJsonFriendlyRecord.partitionKey()); Assert.assertNull(defaultJsonFriendlyRecord.data()); Assert.assertNull(defaultJsonFriendlyRecord.sequenceNumber()); Assert.assertNull(new ShutdownMessage(null).getReason()); // Hit the bad object mapping path - Message withBadMapper = new Message() { - }.withObjectMapper(new ObjectMapper() { + Message withBadMapper = new Message() {}.withObjectMapper(new ObjectMapper() { /** - * + * */ private static final long serialVersionUID = 1L; @Override public String writeValueAsString(Object m) throws JsonProcessingException { - throw new JsonProcessingException(new Throwable()) { - }; + throw new JsonProcessingException(new Throwable()) {}; } }); String s = withBadMapper.toString(); diff --git a/amazon-kinesis-client-multilang/src/test/resources/multilang.properties b/amazon-kinesis-client-multilang/src/test/resources/multilang.properties new file mode 100644 index 00000000..34cb0c1a --- /dev/null +++ b/amazon-kinesis-client-multilang/src/test/resources/multilang.properties @@ -0,0 +1,93 @@ +# The script that abides by the multi-language protocol. This script will +# be executed by the MultiLangDaemon, which will communicate with this script +# over STDIN and STDOUT according to the multi-language protocol. +executableName = sample_kclpy_app.py + +# The Stream arn: arn:aws:kinesis:::stream/ +# Important: streamArn takes precedence over streamName if both are set +streamArn = arn:aws:kinesis:us-east-5:000000000000:stream/kclpysample + +# The name of an Amazon Kinesis stream to process. +# Important: streamArn takes precedence over streamName if both are set +streamName = kclpysample + +# Used by the KCL as the name of this application. Will be used as the name +# of an Amazon DynamoDB table which will store the lease and checkpoint +# information for workers with this application name +applicationName = MultiLangTest + +# Users can change the credentials provider the KCL will use to retrieve credentials. +# The DefaultAWSCredentialsProviderChain checks several other providers, which is +# described here: +# http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/DefaultAWSCredentialsProviderChain.html +AWSCredentialsProvider = DefaultAWSCredentialsProviderChain + +# Appended to the user agent of the KCL. Does not impact the functionality of the +# KCL in any other way. +processingLanguage = python/3.8 + +# Valid options at TRIM_HORIZON or LATEST. +# See http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#API_GetShardIterator_RequestSyntax +initialPositionInStream = TRIM_HORIZON + +# To specify an initial timestamp from which to start processing records, please specify timestamp value for 'initiatPositionInStreamExtended', +# and uncomment below line with right timestamp value. +# See more from 'Timestamp' under http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#API_GetShardIterator_RequestSyntax +#initialPositionInStreamExtended = 1636609142 + +# The following properties are also available for configuring the KCL Worker that is created +# by the MultiLangDaemon. + +# The KCL defaults to us-east-1 +regionName = us-east-1 + +# Fail over time in milliseconds. A worker which does not renew it's lease within this time interval +# will be regarded as having problems and it's shards will be assigned to other workers. +# For applications that have a large number of shards, this msy be set to a higher number to reduce +# the number of DynamoDB IOPS required for tracking leases +failoverTimeMillis = 10000 + +# A worker id that uniquely identifies this worker among all workers using the same applicationName +# If this isn't provided a MultiLangDaemon instance will assign a unique workerId to itself. +workerId = "workerId" + +# Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks. +shardSyncIntervalMillis = 60000 + +# Max records to fetch from Kinesis in a single GetRecords call. +maxRecords = 10000 + +# Idle time between record reads in milliseconds. +idleTimeBetweenReadsInMillis = 1000 + +# Enables applications flush/checkpoint (if they have some data "in progress", but don't get new data for while) +callProcessRecordsEvenForEmptyRecordList = false + +# Interval in milliseconds between polling to check for parent shard completion. +# Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on +# completion of parent shards). +parentShardPollIntervalMillis = 10000 + +# Cleanup leases upon shards completion (don't wait until they expire in Kinesis). +# Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by default we try +# to delete the ones we don't need any longer. +cleanupLeasesUponShardCompletion = true + +# Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). +taskBackoffTimeMillis = 500 + +# Buffer metrics for at most this long before publishing to CloudWatch. +metricsBufferTimeMillis = 10000 + +# Buffer at most this many metrics before publishing to CloudWatch. +metricsMaxQueueSize = 10000 + +# KCL will validate client provided sequence numbers with a call to Amazon Kinesis before checkpointing for calls +# to RecordProcessorCheckpointer#checkpoint(String) by default. +validateSequenceNumberBeforeCheckpointing = true + +# The maximum number of active threads for the MultiLangDaemon to permit. +# If a value is provided then a FixedThreadPool is used with the maximum +# active threads set to the provided value. If a non-positive integer or no +# value is provided a CachedThreadPool is used. +maxActiveThreads = -1 diff --git a/amazon-kinesis-client/pom.xml b/amazon-kinesis-client/pom.xml index 4fe771b7..d3d75b10 100644 --- a/amazon-kinesis-client/pom.xml +++ b/amazon-kinesis-client/pom.xml @@ -1,4 +1,5 @@ + + + + + java,,\# + + + + + + + check + + compile + + + + + com.salesforce.servicelibs + proto-backwards-compatibility + 1.0.7 + + + + backwards-compatibility-check + + + + diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java index be137383..e10583e8 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java @@ -22,5 +22,4 @@ import java.lang.annotation.RetentionPolicy; * Any class/method/variable marked with this annotation is subject to breaking changes between minor releases. */ @Retention(RetentionPolicy.CLASS) -public @interface KinesisClientInternalApi { -} +public @interface KinesisClientInternalApi {} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java index f5af81e3..7b4de295 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java @@ -40,7 +40,10 @@ public class Checkpoint { * @param pendingCheckpoint the pending checkpoint sequence number - can be null. * @param pendingCheckpointState the pending checkpoint state - can be null. */ - public Checkpoint(final ExtendedSequenceNumber checkpoint, final ExtendedSequenceNumber pendingCheckpoint, byte[] pendingCheckpointState) { + public Checkpoint( + final ExtendedSequenceNumber checkpoint, + final ExtendedSequenceNumber pendingCheckpoint, + byte[] pendingCheckpointState) { if (checkpoint == null || checkpoint.sequenceNumber().isEmpty()) { throw new IllegalArgumentException("Checkpoint cannot be null or empty"); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java index dbde3b5a..a76673ed 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java @@ -15,7 +15,6 @@ package software.amazon.kinesis.checkpoint; - import lombok.Data; import lombok.experimental.Accessors; import software.amazon.kinesis.checkpoint.dynamodb.DynamoDBCheckpointFactory; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java index 5a1e8168..2b8e547d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java @@ -60,9 +60,7 @@ public class DoesNothingPreparedCheckpointer implements PreparedCheckpointer { @Override public void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { + IllegalArgumentException { // This method does nothing } - } - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java index de5565bf..7b674ca4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java @@ -20,10 +20,9 @@ import java.util.Collections; import java.util.List; import java.util.Optional; -import org.apache.commons.lang3.StringUtils; - import lombok.Data; import lombok.experimental.Accessors; +import org.apache.commons.lang3.StringUtils; /** * This supports extracting the shardId from a sequence number. @@ -98,11 +97,15 @@ public class SequenceNumberValidator { } } - private static final List SEQUENCE_NUMBER_READERS = Collections - .singletonList(new V2SequenceNumberReader()); + private static final List SEQUENCE_NUMBER_READERS = + Collections.singletonList(new V2SequenceNumberReader()); private Optional retrieveComponentsFor(String sequenceNumber) { - return SEQUENCE_NUMBER_READERS.stream().map(r -> r.read(sequenceNumber)).filter(Optional::isPresent).map(Optional::get).findFirst(); + return SEQUENCE_NUMBER_READERS.stream() + .map(r -> r.read(sequenceNumber)) + .filter(Optional::isPresent) + .map(Optional::get) + .findFirst(); } /** @@ -118,7 +121,7 @@ public class SequenceNumberValidator { * * *

    - * + * * @param sequenceNumber * the sequence number to extract the version from * @return an Optional containing the version if a compatible sequence number reader can be found, an empty Optional @@ -184,5 +187,4 @@ public class SequenceNumberValidator { public Optional validateSequenceNumberForShard(String sequenceNumber, String shardId) { return shardIdFor(sequenceNumber).map(s -> StringUtils.equalsIgnoreCase(s, shardId)); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java index 63e13eaa..ea6bcaa3 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java @@ -38,8 +38,8 @@ public class ShardPreparedCheckpointer implements PreparedCheckpointer { * @param pendingCheckpointSequenceNumber sequence number to checkpoint at * @param checkpointer checkpointer to use */ - public ShardPreparedCheckpointer(ExtendedSequenceNumber pendingCheckpointSequenceNumber, - RecordProcessorCheckpointer checkpointer) { + public ShardPreparedCheckpointer( + ExtendedSequenceNumber pendingCheckpointSequenceNumber, RecordProcessorCheckpointer checkpointer) { this.pendingCheckpointSequenceNumber = pendingCheckpointSequenceNumber; this.checkpointer = checkpointer; } @@ -58,8 +58,8 @@ public class ShardPreparedCheckpointer implements PreparedCheckpointer { @Override public void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { - checkpointer.checkpoint(pendingCheckpointSequenceNumber.sequenceNumber(), - pendingCheckpointSequenceNumber.subSequenceNumber()); + IllegalArgumentException { + checkpointer.checkpoint( + pendingCheckpointSequenceNumber.sequenceNumber(), pendingCheckpointSequenceNumber.subSequenceNumber()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java index fd375264..4de90d94 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java @@ -41,16 +41,22 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpointer { @NonNull private final ShardInfo shardInfo; + @NonNull - @Getter @Accessors(fluent = true) + @Getter + @Accessors(fluent = true) private final Checkpointer checkpointer; // Set to the last value set via checkpoint(). // Sample use: verify application shutdown() invoked checkpoint() at the end of a shard. - @Getter @Accessors(fluent = true) + @Getter + @Accessors(fluent = true) private ExtendedSequenceNumber lastCheckpointValue; - @Getter @Accessors(fluent = true) + + @Getter + @Accessors(fluent = true) private ExtendedSequenceNumber largestPermittedCheckpointValue; + private ExtendedSequenceNumber sequenceNumberAtShardEnd; /** @@ -60,8 +66,11 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi public synchronized void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { if (log.isDebugEnabled()) { - log.debug("Checkpointing {}, token {} at largest permitted value {}", ShardInfo.getLeaseKey(shardInfo), - shardInfo.concurrencyToken(), this.largestPermittedCheckpointValue); + log.debug( + "Checkpointing {}, token {} at largest permitted value {}", + ShardInfo.getLeaseKey(shardInfo), + shardInfo.concurrencyToken(), + this.largestPermittedCheckpointValue); } advancePosition(this.largestPermittedCheckpointValue); } @@ -71,15 +80,15 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi */ @Override public synchronized void checkpoint(Record record) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { // TODO: UserRecord Deprecation if (record == null) { throw new IllegalArgumentException("Could not checkpoint a null record"); } /* else if (record instanceof UserRecord) { - checkpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); - } */ else { + checkpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); + } */ else { checkpoint(record.sequenceNumber(), 0); } } @@ -89,8 +98,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi */ @Override public synchronized void checkpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { checkpoint(sequenceNumber, 0); } @@ -99,12 +108,12 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi */ @Override public synchronized void checkpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { if (subSequenceNumber < 0) { - throw new IllegalArgumentException("Could not checkpoint at invalid, negative subsequence number " - + subSequenceNumber); + throw new IllegalArgumentException( + "Could not checkpoint at invalid, negative subsequence number " + subSequenceNumber); } /* @@ -116,15 +125,18 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi && newCheckpoint.compareTo(largestPermittedCheckpointValue) <= 0) { if (log.isDebugEnabled()) { - log.debug("Checkpointing {}, token {} at specific extended sequence number {}", ShardInfo.getLeaseKey(shardInfo), - shardInfo.concurrencyToken(), newCheckpoint); + log.debug( + "Checkpointing {}, token {} at specific extended sequence number {}", + ShardInfo.getLeaseKey(shardInfo), + shardInfo.concurrencyToken(), + newCheckpoint); } this.advancePosition(newCheckpoint); } else { throw new IllegalArgumentException(String.format( "Could not checkpoint at extended sequence number %s as it did not fall into acceptable range " - + "between the last checkpoint %s and the greatest extended sequence number passed to this " - + "record processor %s", + + "between the last checkpoint %s and the greatest extended sequence number passed to this " + + "record processor %s", newCheckpoint, this.lastCheckpointValue, this.largestPermittedCheckpointValue)); } } @@ -144,7 +156,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi * {@inheritDoc} */ @Override - public PreparedCheckpointer prepareCheckpoint(byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + public PreparedCheckpointer prepareCheckpoint(byte[] applicationState) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { return prepareCheckpoint(largestPermittedCheckpointValue.sequenceNumber(), applicationState); } @@ -152,15 +165,16 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi * {@inheritDoc} */ @Override - public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { // // TODO: UserRecord Deprecation // if (record == null) { throw new IllegalArgumentException("Could not prepare checkpoint a null record"); } /*else if (record instanceof UserRecord) { - return prepareCheckpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); - } */ else { + return prepareCheckpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); + } */ else { return prepareCheckpoint(record.sequenceNumber(), 0, applicationState); } } @@ -188,7 +202,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi */ @Override public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { return prepareCheckpoint(sequenceNumber, 0, applicationState); } @@ -205,11 +220,13 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi * {@inheritDoc} */ @Override - public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { + public PreparedCheckpointer prepareCheckpoint( + String sequenceNumber, long subSequenceNumber, byte[] applicationState) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { if (subSequenceNumber < 0) { - throw new IllegalArgumentException("Could not checkpoint at invalid, negative subsequence number " - + subSequenceNumber); + throw new IllegalArgumentException( + "Could not checkpoint at invalid, negative subsequence number " + subSequenceNumber); } /* @@ -221,8 +238,11 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi && pendingCheckpoint.compareTo(largestPermittedCheckpointValue) <= 0) { if (log.isDebugEnabled()) { - log.debug("Preparing checkpoint {}, token {} at specific extended sequence number {}", - ShardInfo.getLeaseKey(shardInfo), shardInfo.concurrencyToken(), pendingCheckpoint); + log.debug( + "Preparing checkpoint {}, token {} at specific extended sequence number {}", + ShardInfo.getLeaseKey(shardInfo), + shardInfo.concurrencyToken(), + pendingCheckpoint); } return doPrepareCheckpoint(pendingCheckpoint, applicationState); } else { @@ -256,7 +276,6 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi this.sequenceNumberAtShardEnd = extendedSequenceNumber; } - /** * Internal API - has package level access only for testing purposes. * @@ -268,29 +287,35 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi * @throws InvalidStateException */ void advancePosition(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { advancePosition(new ExtendedSequenceNumber(sequenceNumber)); } void advancePosition(ExtendedSequenceNumber extendedSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { ExtendedSequenceNumber checkpointToRecord = extendedSequenceNumber; if (sequenceNumberAtShardEnd != null && sequenceNumberAtShardEnd.equals(extendedSequenceNumber)) { // If we are about to checkpoint the very last sequence number for this shard, we might as well // just checkpoint at SHARD_END checkpointToRecord = ExtendedSequenceNumber.SHARD_END; } - + // Don't checkpoint a value we already successfully checkpointed if (extendedSequenceNumber != null && !extendedSequenceNumber.equals(lastCheckpointValue)) { try { if (log.isDebugEnabled()) { - log.debug("Setting {}, token {} checkpoint to {}", ShardInfo.getLeaseKey(shardInfo), - shardInfo.concurrencyToken(), checkpointToRecord); + log.debug( + "Setting {}, token {} checkpoint to {}", + ShardInfo.getLeaseKey(shardInfo), + shardInfo.concurrencyToken(), + checkpointToRecord); } - checkpointer.setCheckpoint(ShardInfo.getLeaseKey(shardInfo), checkpointToRecord, shardInfo.concurrencyToken()); + checkpointer.setCheckpoint( + ShardInfo.getLeaseKey(shardInfo), checkpointToRecord, shardInfo.concurrencyToken()); lastCheckpointValue = checkpointToRecord; - } catch (ThrottlingException | ShutdownException | InvalidStateException + } catch (ThrottlingException + | ShutdownException + | InvalidStateException | KinesisClientLibDependencyException e) { throw e; } catch (KinesisClientLibException e) { @@ -323,7 +348,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi * @throws ThrottlingException * @throws ShutdownException */ - private PreparedCheckpointer doPrepareCheckpoint(ExtendedSequenceNumber extendedSequenceNumber, byte[] applicationState) + private PreparedCheckpointer doPrepareCheckpoint( + ExtendedSequenceNumber extendedSequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { ExtendedSequenceNumber newPrepareCheckpoint = extendedSequenceNumber; @@ -341,8 +367,14 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi } try { - checkpointer.prepareCheckpoint(ShardInfo.getLeaseKey(shardInfo), newPrepareCheckpoint, shardInfo.concurrencyToken(), applicationState); - } catch (ThrottlingException | ShutdownException | InvalidStateException + checkpointer.prepareCheckpoint( + ShardInfo.getLeaseKey(shardInfo), + newPrepareCheckpoint, + shardInfo.concurrencyToken(), + applicationState); + } catch (ThrottlingException + | ShutdownException + | InvalidStateException | KinesisClientLibDependencyException e) { throw e; } catch (KinesisClientLibException e) { diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java index d200de84..74caae9b 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java @@ -29,9 +29,8 @@ import software.amazon.kinesis.processor.Checkpointer; @KinesisClientInternalApi public class DynamoDBCheckpointFactory implements CheckpointFactory { @Override - public Checkpointer createCheckpointer(final LeaseCoordinator leaseLeaseCoordinator, - final LeaseRefresher leaseRefresher) { + public Checkpointer createCheckpointer( + final LeaseCoordinator leaseLeaseCoordinator, final LeaseRefresher leaseRefresher) { return new DynamoDBCheckpointer(leaseLeaseCoordinator, leaseRefresher); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java index d9646351..0a6a9607 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java @@ -19,7 +19,6 @@ import java.util.Objects; import java.util.UUID; import com.google.common.annotations.VisibleForTesting; - import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; @@ -48,14 +47,16 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; public class DynamoDBCheckpointer implements Checkpointer { @NonNull private final LeaseCoordinator leaseCoordinator; + @NonNull private final LeaseRefresher leaseRefresher; private String operation; @Override - public void setCheckpoint(final String leaseKey, final ExtendedSequenceNumber checkpointValue, - final String concurrencyToken) throws KinesisClientLibException { + public void setCheckpoint( + final String leaseKey, final ExtendedSequenceNumber checkpointValue, final String concurrencyToken) + throws KinesisClientLibException { try { boolean wasSuccessful = setCheckpoint(leaseKey, checkpointValue, UUID.fromString(concurrencyToken)); if (!wasSuccessful) { @@ -97,16 +98,22 @@ public class DynamoDBCheckpointer implements Checkpointer { } @Override - public void prepareCheckpoint(final String leaseKey, final ExtendedSequenceNumber pendingCheckpoint, - final String concurrencyToken) throws KinesisClientLibException { + public void prepareCheckpoint( + final String leaseKey, final ExtendedSequenceNumber pendingCheckpoint, final String concurrencyToken) + throws KinesisClientLibException { prepareCheckpoint(leaseKey, pendingCheckpoint, concurrencyToken, null); } @Override - public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, byte[] pendingCheckpointState) throws KinesisClientLibException { + public void prepareCheckpoint( + String leaseKey, + ExtendedSequenceNumber pendingCheckpoint, + String concurrencyToken, + byte[] pendingCheckpointState) + throws KinesisClientLibException { try { - boolean wasSuccessful = - prepareCheckpoint(leaseKey, pendingCheckpoint, UUID.fromString(concurrencyToken), pendingCheckpointState); + boolean wasSuccessful = prepareCheckpoint( + leaseKey, pendingCheckpoint, UUID.fromString(concurrencyToken), pendingCheckpointState); if (!wasSuccessful) { throw new ShutdownException( "Can't prepare checkpoint - instance doesn't hold the lease for this shard"); @@ -127,8 +134,10 @@ public class DynamoDBCheckpointer implements Checkpointer { throws DependencyException, InvalidStateException, ProvisionedThroughputException { Lease lease = leaseCoordinator.getCurrentlyHeldLease(leaseKey); if (lease == null) { - log.info("Worker {} could not update checkpoint for shard {} because it does not hold the lease", - leaseCoordinator.workerIdentifier(), leaseKey); + log.info( + "Worker {} could not update checkpoint for shard {} because it does not hold the lease", + leaseCoordinator.workerIdentifier(), + leaseKey); return false; } @@ -140,12 +149,18 @@ public class DynamoDBCheckpointer implements Checkpointer { return leaseCoordinator.updateLease(lease, concurrencyToken, operation, leaseKey); } - boolean prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, UUID concurrencyToken, byte[] pendingCheckpointState) + boolean prepareCheckpoint( + String leaseKey, + ExtendedSequenceNumber pendingCheckpoint, + UUID concurrencyToken, + byte[] pendingCheckpointState) throws DependencyException, InvalidStateException, ProvisionedThroughputException { Lease lease = leaseCoordinator.getCurrentlyHeldLease(leaseKey); if (lease == null) { - log.info("Worker {} could not prepare checkpoint for shard {} because it does not hold the lease", - leaseCoordinator.workerIdentifier(), leaseKey); + log.info( + "Worker {} could not prepare checkpoint for shard {} because it does not hold the lease", + leaseCoordinator.workerIdentifier(), + leaseKey); return false; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ArnUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ArnUtil.java new file mode 100644 index 00000000..92037698 --- /dev/null +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ArnUtil.java @@ -0,0 +1,32 @@ +package software.amazon.kinesis.common; + +import lombok.NonNull; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.regions.Region; +import software.amazon.kinesis.annotations.KinesisClientInternalApi; + +import static software.amazon.awssdk.services.kinesis.KinesisAsyncClient.SERVICE_NAME; + +@KinesisClientInternalApi +public final class ArnUtil { + private static final String STREAM_RESOURCE_PREFIX = "stream/"; + + /** + * Construct a Kinesis stream ARN. + * + * @param region The region the stream exists in. + * @param accountId The account the stream belongs to. + * @param streamName The name of the stream. + * @return The {@link Arn} of the Kinesis stream. + */ + public static Arn constructStreamArn( + @NonNull final Region region, @NonNull final String accountId, @NonNull final String streamName) { + return Arn.builder() + .partition(region.metadata().partition().id()) + .service(SERVICE_NAME) + .region(region.id()) + .accountId(accountId) + .resource(STREAM_RESOURCE_PREFIX + streamName) + .build(); + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/CommonCalculations.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/CommonCalculations.java index edb6de2e..d7f33c23 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/CommonCalculations.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/CommonCalculations.java @@ -15,10 +15,8 @@ package software.amazon.kinesis.common; - public class CommonCalculations { - /** * Convenience method for calculating renewer intervals in milliseconds. * diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java index a5bbfebe..2838d62d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java @@ -19,12 +19,12 @@ import java.util.function.Function; import lombok.EqualsAndHashCode; import lombok.Getter; +import lombok.NonNull; import lombok.Setter; import lombok.ToString; -import org.apache.commons.lang3.StringUtils; - -import lombok.NonNull; import lombok.experimental.Accessors; +import org.apache.commons.lang3.StringUtils; +import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; @@ -34,9 +34,9 @@ import software.amazon.kinesis.coordinator.CoordinatorConfig; import software.amazon.kinesis.leases.LeaseManagementConfig; import software.amazon.kinesis.lifecycle.LifecycleConfig; import software.amazon.kinesis.metrics.MetricsConfig; +import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.ProcessorConfig; import software.amazon.kinesis.processor.ShardRecordProcessorFactory; -import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.SingleStreamTracker; import software.amazon.kinesis.processor.StreamTracker; import software.amazon.kinesis.retrieval.RetrievalConfig; @@ -44,7 +44,10 @@ import software.amazon.kinesis.retrieval.RetrievalConfig; /** * This Builder is useful to create all configurations for the KCL with default values. */ -@Getter @Setter @ToString @EqualsAndHashCode +@Getter +@Setter +@ToString +@EqualsAndHashCode @Accessors(fluent = true) public class ConfigsBuilder { /** @@ -128,7 +131,7 @@ public class ConfigsBuilder { } /** - * Constructor to initialize ConfigsBuilder for a single stream. + * Constructor to initialize ConfigsBuilder for a single stream identified by name. * * @param streamName * @param applicationName @@ -138,11 +141,45 @@ public class ConfigsBuilder { * @param workerIdentifier * @param shardRecordProcessorFactory */ - public ConfigsBuilder(@NonNull String streamName, @NonNull String applicationName, - @NonNull KinesisAsyncClient kinesisClient, @NonNull DynamoDbAsyncClient dynamoDBClient, - @NonNull CloudWatchAsyncClient cloudWatchClient, @NonNull String workerIdentifier, + public ConfigsBuilder( + @NonNull String streamName, + @NonNull String applicationName, + @NonNull KinesisAsyncClient kinesisClient, + @NonNull DynamoDbAsyncClient dynamoDBClient, + @NonNull CloudWatchAsyncClient cloudWatchClient, + @NonNull String workerIdentifier, @NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) { - this(new SingleStreamTracker(streamName), + this( + new SingleStreamTracker(streamName), + applicationName, + kinesisClient, + dynamoDBClient, + cloudWatchClient, + workerIdentifier, + shardRecordProcessorFactory); + } + + /** + * Constructor to initialize ConfigsBuilder for a single stream identified by {@link Arn}. + * + * @param streamArn + * @param applicationName + * @param kinesisClient + * @param dynamoDBClient + * @param cloudWatchClient + * @param workerIdentifier + * @param shardRecordProcessorFactory + */ + public ConfigsBuilder( + @NonNull Arn streamArn, + @NonNull String applicationName, + @NonNull KinesisAsyncClient kinesisClient, + @NonNull DynamoDbAsyncClient dynamoDBClient, + @NonNull CloudWatchAsyncClient cloudWatchClient, + @NonNull String workerIdentifier, + @NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) { + this( + new SingleStreamTracker(streamArn), applicationName, kinesisClient, dynamoDBClient, @@ -162,9 +199,13 @@ public class ConfigsBuilder { * @param workerIdentifier * @param shardRecordProcessorFactory */ - public ConfigsBuilder(@NonNull StreamTracker streamTracker, @NonNull String applicationName, - @NonNull KinesisAsyncClient kinesisClient, @NonNull DynamoDbAsyncClient dynamoDBClient, - @NonNull CloudWatchAsyncClient cloudWatchClient, @NonNull String workerIdentifier, + public ConfigsBuilder( + @NonNull StreamTracker streamTracker, + @NonNull String applicationName, + @NonNull KinesisAsyncClient kinesisClient, + @NonNull DynamoDbAsyncClient dynamoDBClient, + @NonNull CloudWatchAsyncClient cloudWatchClient, + @NonNull String workerIdentifier, @NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) { this.applicationName = applicationName; this.kinesisClient = kinesisClient; @@ -184,8 +225,11 @@ public class ConfigsBuilder { public void streamTracker(StreamTracker streamTracker) { this.streamTracker = streamTracker; - this.appStreamTracker = DeprecationUtils.convert(streamTracker, - singleStreamTracker -> singleStreamTracker.streamConfigList().get(0).streamIdentifier().streamName()); + this.appStreamTracker = DeprecationUtils.convert(streamTracker, singleStreamTracker -> singleStreamTracker + .streamConfigList() + .get(0) + .streamIdentifier() + .streamName()); } /** diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DeprecationUtils.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DeprecationUtils.java index 5d8782e0..73ff0bff 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DeprecationUtils.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DeprecationUtils.java @@ -39,8 +39,7 @@ public final class DeprecationUtils { */ @Deprecated public static Either convert( - StreamTracker streamTracker, - Function converter) { + StreamTracker streamTracker, Function converter) { if (streamTracker instanceof MultiStreamTracker) { return Either.left((MultiStreamTracker) streamTracker); } else if (streamTracker instanceof SingleStreamTracker) { @@ -49,5 +48,4 @@ public final class DeprecationUtils { throw new IllegalArgumentException("Unhandled StreamTracker: " + streamTracker); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DiagnosticUtils.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DiagnosticUtils.java index 0f350c07..05e8e58d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DiagnosticUtils.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DiagnosticUtils.java @@ -15,12 +15,12 @@ package software.amazon.kinesis.common; -import org.slf4j.Logger; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; - import java.time.Duration; import java.time.Instant; +import org.slf4j.Logger; +import software.amazon.kinesis.annotations.KinesisClientInternalApi; + import static software.amazon.kinesis.lifecycle.ShardConsumer.MAX_TIME_BETWEEN_REQUEST_RESPONSE; @KinesisClientInternalApi @@ -32,18 +32,22 @@ public class DiagnosticUtils { * @param enqueueTimestamp of the event submitted to the executor service * @param log Slf4j Logger from RecordPublisher to log the events */ - public static void takeDelayedDeliveryActionIfRequired(String resourceIdentifier, Instant enqueueTimestamp, Logger log) { - final long durationBetweenEnqueueAndAckInMillis = Duration - .between(enqueueTimestamp, Instant.now()).toMillis(); + public static void takeDelayedDeliveryActionIfRequired( + String resourceIdentifier, Instant enqueueTimestamp, Logger log) { + final long durationBetweenEnqueueAndAckInMillis = + Duration.between(enqueueTimestamp, Instant.now()).toMillis(); if (durationBetweenEnqueueAndAckInMillis > 11000) { // The above condition logs the warn msg if the delivery time exceeds 11 seconds. log.warn( "{}: Record delivery time to shard consumer is high at {} millis. Check the ExecutorStateEvent logs" + " to see the state of the executor service. Also check if the RecordProcessor's processing " + "time is high. ", - resourceIdentifier, durationBetweenEnqueueAndAckInMillis); + resourceIdentifier, + durationBetweenEnqueueAndAckInMillis); } else if (log.isDebugEnabled()) { - log.debug("{}: Record delivery time to shard consumer is {} millis", resourceIdentifier, + log.debug( + "{}: Record delivery time to shard consumer is {} millis", + resourceIdentifier, durationBetweenEnqueueAndAckInMillis); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/FutureUtils.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/FutureUtils.java index 9410d6fd..3c104d8d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/FutureUtils.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/FutureUtils.java @@ -31,5 +31,4 @@ public class FutureUtils { throw te; } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/HashKeyRangeForLease.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/HashKeyRangeForLease.java index d2540073..8d52ec90 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/HashKeyRangeForLease.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/HashKeyRangeForLease.java @@ -15,26 +15,30 @@ package software.amazon.kinesis.common; +import java.math.BigInteger; + import lombok.NonNull; import lombok.Value; import lombok.experimental.Accessors; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.kinesis.model.HashKeyRange; -import java.math.BigInteger; - -@Value @Accessors(fluent = true) /** * Lease POJO to hold the starting hashkey range and ending hashkey range of kinesis shards. */ +@Accessors(fluent = true) +@Value public class HashKeyRangeForLease { private final BigInteger startingHashKey; private final BigInteger endingHashKey; public HashKeyRangeForLease(BigInteger startingHashKey, BigInteger endingHashKey) { - Validate.isTrue(startingHashKey.compareTo(endingHashKey) < 0, - "StartingHashKey %s must be less than EndingHashKey %s ", startingHashKey, endingHashKey); + Validate.isTrue( + startingHashKey.compareTo(endingHashKey) < 0, + "StartingHashKey %s must be less than EndingHashKey %s ", + startingHashKey, + endingHashKey); this.startingHashKey = startingHashKey; this.endingHashKey = endingHashKey; } @@ -64,11 +68,15 @@ public class HashKeyRangeForLease { * @param endingHashKeyStr * @return HashKeyRangeForLease */ - public static HashKeyRangeForLease deserialize(@NonNull String startingHashKeyStr, @NonNull String endingHashKeyStr) { + public static HashKeyRangeForLease deserialize( + @NonNull String startingHashKeyStr, @NonNull String endingHashKeyStr) { final BigInteger startingHashKey = new BigInteger(startingHashKeyStr); final BigInteger endingHashKey = new BigInteger(endingHashKeyStr); - Validate.isTrue(startingHashKey.compareTo(endingHashKey) < 0, - "StartingHashKey %s must be less than EndingHashKey %s ", startingHashKeyStr, endingHashKeyStr); + Validate.isTrue( + startingHashKey.compareTo(endingHashKey) < 0, + "StartingHashKey %s must be less than EndingHashKey %s ", + startingHashKeyStr, + endingHashKeyStr); return new HashKeyRangeForLease(startingHashKey, endingHashKey); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java index b3bedd88..5c512933 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java @@ -14,16 +14,17 @@ */ package software.amazon.kinesis.common; +import java.util.Date; + import lombok.EqualsAndHashCode; import lombok.ToString; -import java.util.Date; - /** * Class that houses the entities needed to specify the position in the stream from where a new application should * start. */ -@ToString @EqualsAndHashCode +@ToString +@EqualsAndHashCode public class InitialPositionInStreamExtended { private final InitialPositionInStream position; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java index 45bd88e2..c2f3ca7d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java @@ -15,14 +15,14 @@ package software.amazon.kinesis.common; +import java.time.Duration; + import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.nio.netty.Http2Configuration; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder; -import java.time.Duration; - /** * Utility to setup KinesisAsyncClient to be used with KCL. */ @@ -42,9 +42,12 @@ public class KinesisClientUtil { } public static KinesisAsyncClientBuilder adjustKinesisClientBuilder(KinesisAsyncClientBuilder builder) { - return builder.httpClientBuilder(NettyNioAsyncHttpClient.builder().maxConcurrency(Integer.MAX_VALUE) - .http2Configuration(Http2Configuration.builder().initialWindowSize(INITIAL_WINDOW_SIZE_BYTES) - .healthCheckPingPeriod(Duration.ofMillis(HEALTH_CHECK_PING_PERIOD_MILLIS)).build()) + return builder.httpClientBuilder(NettyNioAsyncHttpClient.builder() + .maxConcurrency(Integer.MAX_VALUE) + .http2Configuration(Http2Configuration.builder() + .initialWindowSize(INITIAL_WINDOW_SIZE_BYTES) + .healthCheckPingPeriod(Duration.ofMillis(HEALTH_CHECK_PING_PERIOD_MILLIS)) + .build()) .protocol(Protocol.HTTP2)); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java index 52e16f3e..9ef43b8e 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java @@ -63,12 +63,11 @@ public class KinesisRequestsBuilder { @SuppressWarnings("unchecked") private static T appendUserAgent(final T builder) { - return (T) builder - .overrideConfiguration( - AwsRequestOverrideConfiguration.builder() - .addApiName(ApiName.builder().name(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT) - .version(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT_VERSION).build()) + return (T) builder.overrideConfiguration(AwsRequestOverrideConfiguration.builder() + .addApiName(ApiName.builder() + .name(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT) + .version(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT_VERSION) + .build()) .build()); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/LeaseCleanupConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/LeaseCleanupConfig.java index b2582d45..6c29c76b 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/LeaseCleanupConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/LeaseCleanupConfig.java @@ -24,7 +24,7 @@ import lombok.experimental.Accessors; */ @Builder @Getter -@Accessors(fluent=true) +@Accessors(fluent = true) public class LeaseCleanupConfig { /** * Interval at which to run lease cleanup thread. diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/RequestDetails.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/RequestDetails.java index 9f511123..108a12d4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/RequestDetails.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/RequestDetails.java @@ -15,11 +15,11 @@ package software.amazon.kinesis.common; -import lombok.experimental.Accessors; - import java.util.Optional; -@Accessors(fluent=true) +import lombok.experimental.Accessors; + +@Accessors(fluent = true) public class RequestDetails { /** @@ -62,6 +62,4 @@ public class RequestDetails { public String toString() { return String.format("request id - %s, timestamp - %s", getRequestId(), getTimestamp()); } - } - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamConfig.java index b1057f13..95772008 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamConfig.java @@ -15,15 +15,20 @@ package software.amazon.kinesis.common; +import lombok.AllArgsConstructor; import lombok.Data; +import lombok.NonNull; +import lombok.RequiredArgsConstructor; import lombok.experimental.Accessors; +@AllArgsConstructor +@RequiredArgsConstructor @Data @Accessors(fluent = true) public class StreamConfig { + @NonNull private final StreamIdentifier streamIdentifier; + private final InitialPositionInStreamExtended initialPositionInStreamExtended; private String consumerArn; } - - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamIdentifier.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamIdentifier.java index 1259a609..2070a535 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamIdentifier.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamIdentifier.java @@ -15,45 +15,74 @@ package software.amazon.kinesis.common; -import com.google.common.base.Joiner; +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import lombok.AccessLevel; +import lombok.Builder; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; import lombok.experimental.Accessors; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.utils.Validate; -import java.util.Optional; -import java.util.regex.Pattern; - -@EqualsAndHashCode @Getter @Accessors(fluent = true) +@Builder(access = AccessLevel.PRIVATE) +@EqualsAndHashCode +@Getter +@Accessors(fluent = true) public class StreamIdentifier { - private final Optional accountIdOptional; + + @Builder.Default + private final Optional accountIdOptional = Optional.empty(); + + @NonNull private final String streamName; - private final Optional streamCreationEpochOptional; - private static final String DELIMITER = ":"; - private static final Pattern PATTERN = Pattern.compile(".*" + ":" + ".*" + ":" + "[0-9]*"); + @Builder.Default + private final Optional streamCreationEpochOptional = Optional.empty(); - private StreamIdentifier(@NonNull String accountId, @NonNull String streamName, @NonNull Long streamCreationEpoch) { - this.accountIdOptional = Optional.of(accountId); - this.streamName = streamName; - this.streamCreationEpochOptional = Optional.of(streamCreationEpoch); - } + @Builder.Default + @EqualsAndHashCode.Exclude + private final Optional streamArnOptional = Optional.empty(); - private StreamIdentifier(@NonNull String streamName) { - this.accountIdOptional = Optional.empty(); - this.streamName = streamName; - this.streamCreationEpochOptional = Optional.empty(); - } + /** + * Pattern for a serialized {@link StreamIdentifier}. The valid format is + * {@code ::}. + */ + private static final Pattern STREAM_IDENTIFIER_PATTERN = + Pattern.compile("(?[0-9]+):(?[^:]+):(?[0-9]+)"); + + /** + * Pattern for a stream ARN. The valid format is + * {@code arn:aws:kinesis:::stream:} + * where {@code region} is the id representation of a {@link Region}. + */ + private static final Pattern STREAM_ARN_PATTERN = Pattern.compile( + "arn:aws[^:]*:kinesis:(?[-a-z0-9]+):(?[0-9]{12}):stream/(?.+)"); /** * Serialize the current StreamIdentifier instance. - * @return + * + * @return a String of {@code account:stream:creationEpoch} in multi-stream mode + * or {@link #streamName} in single-stream mode. */ public String serialize() { - return accountIdOptional.isPresent() ? - Joiner.on(DELIMITER).join(accountIdOptional.get(), streamName, streamCreationEpochOptional.get()) : - streamName; + if (!streamCreationEpochOptional.isPresent()) { + // creation epoch is expected to be empty in single-stream mode + return streamName; + } + + final char delimiter = ':'; + final StringBuilder sb = new StringBuilder() + .append(accountIdOptional.get()) + .append(delimiter) + .append(streamName) + .append(delimiter) + .append(streamCreationEpochOptional.get()); + return sb.toString(); } @Override @@ -62,27 +91,105 @@ public class StreamIdentifier { } /** - * Create a multi stream instance for StreamIdentifier from serialized stream identifier. - * The serialized stream identifier should be of the format account:stream:creationepoch - * @param streamIdentifierSer - * @return StreamIdentifier + * Create a multi stream instance for StreamIdentifier from serialized stream identifier + * of format {@link #STREAM_IDENTIFIER_PATTERN} + * + * @param streamIdentifierSer a String of {@code account:stream:creationEpoch} + * @return StreamIdentifier with {@link #accountIdOptional} and {@link #streamCreationEpochOptional} present */ public static StreamIdentifier multiStreamInstance(String streamIdentifierSer) { - if (PATTERN.matcher(streamIdentifierSer).matches()) { - final String[] split = streamIdentifierSer.split(DELIMITER); - return new StreamIdentifier(split[0], split[1], Long.parseLong(split[2])); - } else { - throw new IllegalArgumentException("Unable to deserialize StreamIdentifier from " + streamIdentifierSer); + final Matcher matcher = STREAM_IDENTIFIER_PATTERN.matcher(streamIdentifierSer); + if (matcher.matches()) { + final String accountId = matcher.group("accountId"); + final String streamName = matcher.group("streamName"); + final Long creationEpoch = Long.valueOf(matcher.group("creationEpoch")); + + validateCreationEpoch(creationEpoch); + + return StreamIdentifier.builder() + .accountIdOptional(Optional.of(accountId)) + .streamName(streamName) + .streamCreationEpochOptional(Optional.of(creationEpoch)) + .build(); } + + throw new IllegalArgumentException("Unable to deserialize StreamIdentifier from " + streamIdentifierSer); + } + + /** + * Create a multi stream instance for StreamIdentifier from stream {@link Arn}. + * + * @param streamArn an {@link Arn} of format {@link #STREAM_ARN_PATTERN} + * @param creationEpoch Creation epoch of the stream. This value will + * reflect in the lease key and is assumed to be correct. (KCL could + * verify, but that creates issues for both bootstrapping and, with large + * KCL applications, API throttling against DescribeStreamSummary.) + * If this epoch is reused for two identically-named streams in the same + * account -- such as deleting and recreating a stream -- then KCL will + * be unable to differentiate leases between the old and new stream + * since the lease keys collide on this creation epoch. + * @return StreamIdentifier with {@link #accountIdOptional}, {@link #streamCreationEpochOptional}, + * and {@link #streamArnOptional} present + */ + public static StreamIdentifier multiStreamInstance(Arn streamArn, long creationEpoch) { + validateArn(streamArn); + validateCreationEpoch(creationEpoch); + + return StreamIdentifier.builder() + .accountIdOptional(streamArn.accountId()) + .streamName(streamArn.resource().resource()) + .streamCreationEpochOptional(Optional.of(creationEpoch)) + .streamArnOptional(Optional.of(streamArn)) + .build(); } /** * Create a single stream instance for StreamIdentifier from stream name. - * @param streamName - * @return StreamIdentifier + * + * @param streamName stream name of a Kinesis stream */ public static StreamIdentifier singleStreamInstance(String streamName) { Validate.notEmpty(streamName, "StreamName should not be empty"); - return new StreamIdentifier(streamName); + + return StreamIdentifier.builder().streamName(streamName).build(); + } + + /** + * Create a single stream instance for StreamIdentifier from AWS Kinesis stream {@link Arn}. + * + * @param streamArn AWS ARN of a Kinesis stream + * @return StreamIdentifier with {@link #accountIdOptional} and {@link #streamArnOptional} present + */ + public static StreamIdentifier singleStreamInstance(Arn streamArn) { + validateArn(streamArn); + + return StreamIdentifier.builder() + .accountIdOptional(streamArn.accountId()) + .streamName(streamArn.resource().resource()) + .streamArnOptional(Optional.of(streamArn)) + .build(); + } + + /** + * Verify the streamArn follows the appropriate formatting. + * Throw an exception if it does not. + * @param streamArn + */ + public static void validateArn(Arn streamArn) { + if (!STREAM_ARN_PATTERN.matcher(streamArn.toString()).matches() + || !streamArn.region().isPresent()) { + throw new IllegalArgumentException("Invalid streamArn " + streamArn); + } + } + + /** + * Verify creationEpoch is greater than 0. + * Throw an exception if it is not. + * @param creationEpoch + */ + private static void validateCreationEpoch(long creationEpoch) { + if (creationEpoch <= 0) { + throw new IllegalArgumentException("Creation epoch must be > 0; received " + creationEpoch); + } } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java index d5c4dc13..e1835228 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java @@ -96,5 +96,4 @@ public class CoordinatorConfig { *

    Default value: 1000L

    */ private long schedulerInitializationBackoffTimeMillis = 1000L; - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeletedStreamListProvider.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeletedStreamListProvider.java new file mode 100644 index 00000000..d4057999 --- /dev/null +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeletedStreamListProvider.java @@ -0,0 +1,37 @@ +package software.amazon.kinesis.coordinator; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.common.StreamIdentifier; + +/** + * This class is used for storing in-memory set of streams which are no longer existing (deleted) and needs to be + * cleaned up from KCL's in memory state. + */ +@Slf4j +public class DeletedStreamListProvider { + + private final Set deletedStreams; + + public DeletedStreamListProvider() { + deletedStreams = ConcurrentHashMap.newKeySet(); + } + + public void add(StreamIdentifier streamIdentifier) { + log.info("Added {}", streamIdentifier); + deletedStreams.add(streamIdentifier); + } + + /** + * Method returns and empties the current set of streams + * @return set of deleted Streams + */ + public Set purgeAllDeletedStream() { + final Set response = new HashSet<>(deletedStreams); + deletedStreams.removeAll(response); + return response; + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDecider.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDecider.java index b06dba39..4c7f25da 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDecider.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDecider.java @@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BooleanSupplier; import java.util.stream.Collectors; + import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.kinesis.leases.Lease; @@ -45,8 +46,7 @@ import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; * This ensures redundancy for shard-sync during host failures. */ @Slf4j -class DeterministicShuffleShardSyncLeaderDecider - implements LeaderDecider { +class DeterministicShuffleShardSyncLeaderDecider implements LeaderDecider { // Fixed seed so that the shuffle order is preserved across workers static final int DETERMINISTIC_SHUFFLE_SEED = 1947; @@ -67,13 +67,11 @@ class DeterministicShuffleShardSyncLeaderDecider * @param leaderElectionThreadPool Thread-pool to be used for leaderElection. * @param numPeriodicShardSyncWorkers Number of leaders that will be elected to perform periodic shard syncs. */ - DeterministicShuffleShardSyncLeaderDecider(LeaseRefresher leaseRefresher, - ScheduledExecutorService leaderElectionThreadPool, - int numPeriodicShardSyncWorkers) { - this(leaseRefresher, - leaderElectionThreadPool, - numPeriodicShardSyncWorkers, - new ReentrantReadWriteLock()); + DeterministicShuffleShardSyncLeaderDecider( + LeaseRefresher leaseRefresher, + ScheduledExecutorService leaderElectionThreadPool, + int numPeriodicShardSyncWorkers) { + this(leaseRefresher, leaderElectionThreadPool, numPeriodicShardSyncWorkers, new ReentrantReadWriteLock()); } /** @@ -82,10 +80,11 @@ class DeterministicShuffleShardSyncLeaderDecider * @param numPeriodicShardSyncWorkers Number of leaders that will be elected to perform periodic shard syncs. * @param readWriteLock Mechanism to lock for reading and writing of critical components */ - DeterministicShuffleShardSyncLeaderDecider(LeaseRefresher leaseRefresher, - ScheduledExecutorService leaderElectionThreadPool, - int numPeriodicShardSyncWorkers, - ReadWriteLock readWriteLock) { + DeterministicShuffleShardSyncLeaderDecider( + LeaseRefresher leaseRefresher, + ScheduledExecutorService leaderElectionThreadPool, + int numPeriodicShardSyncWorkers, + ReadWriteLock readWriteLock) { this.leaseRefresher = leaseRefresher; this.leaderElectionThreadPool = leaderElectionThreadPool; this.numPeriodicShardSyncWorkers = numPeriodicShardSyncWorkers; @@ -101,8 +100,12 @@ class DeterministicShuffleShardSyncLeaderDecider try { log.debug("Started leader election at: " + Instant.now()); List leases = leaseRefresher.listLeases(); - List uniqueHosts = leases.stream().map(Lease::leaseOwner) - .filter(owner -> owner != null).distinct().sorted().collect(Collectors.toList()); + List uniqueHosts = leases.stream() + .map(Lease::leaseOwner) + .filter(owner -> owner != null) + .distinct() + .sorted() + .collect(Collectors.toList()); Collections.shuffle(uniqueHosts, new Random(DETERMINISTIC_SHUFFLE_SEED)); int numShardSyncWorkers = Math.min(uniqueHosts.size(), numPeriodicShardSyncWorkers); @@ -137,8 +140,11 @@ class DeterministicShuffleShardSyncLeaderDecider // The first run will be after a minute. // We don't need jitter since it is scheduled with a fixed delay and time taken to scan leases // will be different at different times and on different hosts/workers. - leaderElectionThreadPool.scheduleWithFixedDelay(this::electLeaders, ELECTION_INITIAL_DELAY_MILLIS, - ELECTION_SCHEDULING_INTERVAL_MILLIS, TimeUnit.MILLISECONDS); + leaderElectionThreadPool.scheduleWithFixedDelay( + this::electLeaders, + ELECTION_INITIAL_DELAY_MILLIS, + ELECTION_SCHEDULING_INTERVAL_MILLIS, + TimeUnit.MILLISECONDS); } return executeConditionCheckWithReadLock(() -> isWorkerLeaderForShardSync(workerId)); @@ -152,7 +158,8 @@ class DeterministicShuffleShardSyncLeaderDecider log.info("Successfully stopped leader election on the worker"); } else { leaderElectionThreadPool.shutdownNow(); - log.info(String.format("Stopped leader election thread after awaiting termination for %d milliseconds", + log.info(String.format( + "Stopped leader election thread after awaiting termination for %d milliseconds", AWAIT_TERMINATION_MILLIS)); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DiagnosticEventFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DiagnosticEventFactory.java index 316313aa..8fe61a94 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DiagnosticEventFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DiagnosticEventFactory.java @@ -15,11 +15,11 @@ package software.amazon.kinesis.coordinator; +import java.util.concurrent.ExecutorService; + import lombok.NoArgsConstructor; import software.amazon.kinesis.leases.LeaseCoordinator; -import java.util.concurrent.ExecutorService; - /** * Creates {@link DiagnosticEvent}s for logging and visibility */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/ExecutorStateEvent.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/ExecutorStateEvent.java index 33c83a5c..b0c0b55d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/ExecutorStateEvent.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/ExecutorStateEvent.java @@ -15,15 +15,15 @@ package software.amazon.kinesis.coordinator; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadPoolExecutor; + import lombok.Getter; import lombok.ToString; import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.leases.LeaseCoordinator; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadPoolExecutor; - @Getter @ToString(exclude = "isThreadPoolExecutor") @Slf4j diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java index 64b201af..d805d6cd 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java @@ -14,22 +14,22 @@ */ package software.amazon.kinesis.coordinator; +import java.util.concurrent.CountDownLatch; + +import lombok.Builder; import lombok.Data; import lombok.experimental.Accessors; -import java.util.concurrent.CountDownLatch; - @Data +@Builder @Accessors(fluent = true) class GracefulShutdownContext { private final CountDownLatch shutdownCompleteLatch; private final CountDownLatch notificationCompleteLatch; + private final CountDownLatch finalShutdownLatch; private final Scheduler scheduler; - static GracefulShutdownContext SHUTDOWN_ALREADY_COMPLETED = new GracefulShutdownContext(null, null, null); - - boolean isShutdownAlreadyCompleted() { + boolean isRecordProcessorShutdownComplete() { return shutdownCompleteLatch == null && notificationCompleteLatch == null && scheduler == null; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java index 260e25d6..af3a2dca 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java @@ -14,20 +14,27 @@ */ package software.amazon.kinesis.coordinator; - -import lombok.extern.slf4j.Slf4j; - import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import lombok.extern.slf4j.Slf4j; + class GracefulShutdownCoordinator { + /** + * arbitrary wait time for worker's finalShutdown + */ + private static final long FINAL_SHUTDOWN_WAIT_TIME_SECONDS = 60L; + CompletableFuture startGracefulShutdown(Callable shutdownCallable) { CompletableFuture cf = new CompletableFuture<>(); CompletableFuture.runAsync(() -> { - try { cf.complete(shutdownCallable.call()); } - catch(Throwable ex) { cf.completeExceptionally(ex); } + try { + cf.complete(shutdownCallable.call()); + } catch (Throwable ex) { + cf.completeExceptionally(ex); + } }); return cf; } @@ -45,7 +52,8 @@ class GracefulShutdownCoordinator { } private boolean isWorkerShutdownComplete(GracefulShutdownContext context) { - return context.scheduler().shutdownComplete() || context.scheduler().shardInfoShardConsumerMap().isEmpty(); + return context.scheduler().shutdownComplete() + || context.scheduler().shardInfoShardConsumerMap().isEmpty(); } private String awaitingLogMessage(GracefulShutdownContext context) { @@ -62,7 +70,18 @@ class GracefulShutdownCoordinator { return String.format("Waiting for %d record processors to complete final shutdown", outstanding); } + /** + * used to wait for the worker's final shutdown to complete before returning the future for graceful shutdown + * @return true if the final shutdown is successful, false otherwise. + */ + private boolean waitForFinalShutdown(GracefulShutdownContext context) throws InterruptedException { + return context.finalShutdownLatch().await(FINAL_SHUTDOWN_WAIT_TIME_SECONDS, TimeUnit.SECONDS); + } + private boolean waitForRecordProcessors(GracefulShutdownContext context) { + if (context.isRecordProcessorShutdownComplete()) { + return true; + } // // Awaiting for all ShardConsumer/RecordProcessors to be notified that a shutdown has been requested. @@ -76,12 +95,14 @@ class GracefulShutdownCoordinator { throw new InterruptedException(); } log.info(awaitingLogMessage(context)); - if (workerShutdownWithRemaining(context.shutdownCompleteLatch().getCount(), context)) { + if (workerShutdownWithRemaining( + context.shutdownCompleteLatch().getCount(), context)) { return false; } } } catch (InterruptedException ie) { - log.warn("Interrupted while waiting for notification complete, terminating shutdown. {}", + log.warn( + "Interrupted while waiting for notification complete, terminating shutdown. {}", awaitingLogMessage(context)); return false; } @@ -113,12 +134,14 @@ class GracefulShutdownCoordinator { throw new InterruptedException(); } log.info(awaitingFinalShutdownMessage(context)); - if (workerShutdownWithRemaining(context.shutdownCompleteLatch().getCount(), context)) { + if (workerShutdownWithRemaining( + context.shutdownCompleteLatch().getCount(), context)) { return false; } } } catch (InterruptedException ie) { - log.warn("Interrupted while waiting for shutdown completion, terminating shutdown. {}", + log.warn( + "Interrupted while waiting for shutdown completion, terminating shutdown. {}", awaitingFinalShutdownMessage(context)); return false; } @@ -136,9 +159,12 @@ class GracefulShutdownCoordinator { private boolean workerShutdownWithRemaining(long outstanding, GracefulShutdownContext context) { if (isWorkerShutdownComplete(context)) { if (outstanding != 0) { - log.info("Shutdown completed, but shutdownCompleteLatch still had outstanding {} with a current" - + " value of {}. shutdownComplete: {} -- Consumer Map: {}", outstanding, - context.shutdownCompleteLatch().getCount(), context.scheduler().shutdownComplete(), + log.info( + "Shutdown completed, but shutdownCompleteLatch still had outstanding {} with a current" + + " value of {}. shutdownComplete: {} -- Consumer Map: {}", + outstanding, + context.shutdownCompleteLatch().getCount(), + context.scheduler().shutdownComplete(), context.scheduler().shardInfoShardConsumerMap().size()); return true; } @@ -148,14 +174,13 @@ class GracefulShutdownCoordinator { @Override public Boolean call() throws Exception { - GracefulShutdownContext context; try { - context = startWorkerShutdown.call(); + final GracefulShutdownContext context = startWorkerShutdown.call(); + return waitForRecordProcessors(context) && waitForFinalShutdown(context); } catch (Exception ex) { log.warn("Caught exception while requesting initial worker shutdown.", ex); throw ex; } - return context.isShutdownAlreadyCompleted() || waitForRecordProcessors(context); } } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java index 3e0432cb..8accdfcb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java @@ -16,15 +16,11 @@ package software.amazon.kinesis.coordinator; public class NoOpWorkerStateChangeListener implements WorkerStateChangeListener { - /** - * Empty constructor for NoOp Worker State Change Listener - */ - public NoOpWorkerStateChangeListener() { + /** + * Empty constructor for NoOp Worker State Change Listener + */ + public NoOpWorkerStateChangeListener() {} - } - - @Override - public void onWorkerStateChange(WorkerState newState) { - - } + @Override + public void onWorkerStateChange(WorkerState newState) {} } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java index a885c4d9..f0133ec8 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java @@ -14,8 +14,27 @@ */ package software.amazon.kinesis.coordinator; +import java.io.Serializable; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.stream.Collectors; + import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ComparisonChain; +import lombok.AccessLevel; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; @@ -26,6 +45,7 @@ import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; import software.amazon.awssdk.services.kinesis.model.Shard; import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.common.HashKeyRangeForLease; import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; @@ -44,22 +64,6 @@ import software.amazon.kinesis.metrics.MetricsLevel; import software.amazon.kinesis.metrics.MetricsScope; import software.amazon.kinesis.metrics.MetricsUtil; -import java.io.Serializable; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.stream.Collectors; - import static software.amazon.kinesis.common.HashKeyRangeForLease.fromHashKeyRange; /** @@ -69,12 +73,16 @@ import static software.amazon.kinesis.common.HashKeyRangeForLease.fromHashKeyRan @Getter @EqualsAndHashCode @Slf4j +@KinesisClientInternalApi class PeriodicShardSyncManager { private static final long INITIAL_DELAY = 60 * 1000L; + @VisibleForTesting static final BigInteger MIN_HASH_KEY = BigInteger.ZERO; + @VisibleForTesting static final BigInteger MAX_HASH_KEY = new BigInteger("2").pow(128).subtract(BigInteger.ONE); + static final String PERIODIC_SHARD_SYNC_MANAGER = "PeriodicShardSyncManager"; private final Map hashRangeHoleTrackerMap = new HashMap<>(); @@ -83,31 +91,58 @@ class PeriodicShardSyncManager { private final LeaseRefresher leaseRefresher; private final Map currentStreamConfigMap; private final Function shardSyncTaskManagerProvider; + private final Map streamToShardSyncTaskManagerMap; private final ScheduledExecutorService shardSyncThreadPool; private final boolean isMultiStreamingMode; private final MetricsFactory metricsFactory; private final long leasesRecoveryAuditorExecutionFrequencyMillis; private final int leasesRecoveryAuditorInconsistencyConfidenceThreshold; + + @Getter(AccessLevel.NONE) + private final AtomicBoolean leaderSynced; + private boolean isRunning; - PeriodicShardSyncManager(String workerId, LeaderDecider leaderDecider, LeaseRefresher leaseRefresher, - Map currentStreamConfigMap, - Function shardSyncTaskManagerProvider, boolean isMultiStreamingMode, - MetricsFactory metricsFactory, - long leasesRecoveryAuditorExecutionFrequencyMillis, - int leasesRecoveryAuditorInconsistencyConfidenceThreshold) { - this(workerId, leaderDecider, leaseRefresher, currentStreamConfigMap, shardSyncTaskManagerProvider, - Executors.newSingleThreadScheduledExecutor(), isMultiStreamingMode, metricsFactory, - leasesRecoveryAuditorExecutionFrequencyMillis, leasesRecoveryAuditorInconsistencyConfidenceThreshold); - } - - PeriodicShardSyncManager(String workerId, LeaderDecider leaderDecider, LeaseRefresher leaseRefresher, + PeriodicShardSyncManager( + String workerId, + LeaderDecider leaderDecider, + LeaseRefresher leaseRefresher, Map currentStreamConfigMap, Function shardSyncTaskManagerProvider, - ScheduledExecutorService shardSyncThreadPool, boolean isMultiStreamingMode, + Map streamToShardSyncTaskManagerMap, + boolean isMultiStreamingMode, MetricsFactory metricsFactory, long leasesRecoveryAuditorExecutionFrequencyMillis, - int leasesRecoveryAuditorInconsistencyConfidenceThreshold) { + int leasesRecoveryAuditorInconsistencyConfidenceThreshold, + AtomicBoolean leaderSynced) { + this( + workerId, + leaderDecider, + leaseRefresher, + currentStreamConfigMap, + shardSyncTaskManagerProvider, + streamToShardSyncTaskManagerMap, + Executors.newSingleThreadScheduledExecutor(), + isMultiStreamingMode, + metricsFactory, + leasesRecoveryAuditorExecutionFrequencyMillis, + leasesRecoveryAuditorInconsistencyConfidenceThreshold, + leaderSynced); + } + + PeriodicShardSyncManager( + String workerId, + LeaderDecider leaderDecider, + LeaseRefresher leaseRefresher, + Map currentStreamConfigMap, + Function shardSyncTaskManagerProvider, + Map streamToShardSyncTaskManagerMap, + ScheduledExecutorService shardSyncThreadPool, + boolean isMultiStreamingMode, + MetricsFactory metricsFactory, + long leasesRecoveryAuditorExecutionFrequencyMillis, + int leasesRecoveryAuditorInconsistencyConfidenceThreshold, + AtomicBoolean leaderSynced) { Validate.notBlank(workerId, "WorkerID is required to initialize PeriodicShardSyncManager."); Validate.notNull(leaderDecider, "LeaderDecider is required to initialize PeriodicShardSyncManager."); this.workerId = workerId; @@ -115,11 +150,14 @@ class PeriodicShardSyncManager { this.leaseRefresher = leaseRefresher; this.currentStreamConfigMap = currentStreamConfigMap; this.shardSyncTaskManagerProvider = shardSyncTaskManagerProvider; + this.streamToShardSyncTaskManagerMap = streamToShardSyncTaskManagerMap; this.shardSyncThreadPool = shardSyncThreadPool; this.isMultiStreamingMode = isMultiStreamingMode; this.metricsFactory = metricsFactory; this.leasesRecoveryAuditorExecutionFrequencyMillis = leasesRecoveryAuditorExecutionFrequencyMillis; - this.leasesRecoveryAuditorInconsistencyConfidenceThreshold = leasesRecoveryAuditorInconsistencyConfidenceThreshold; + this.leasesRecoveryAuditorInconsistencyConfidenceThreshold = + leasesRecoveryAuditorInconsistencyConfidenceThreshold; + this.leaderSynced = leaderSynced; } public synchronized TaskResult start() { @@ -131,10 +169,12 @@ class PeriodicShardSyncManager { log.error("Error during runShardSync.", t); } }; - shardSyncThreadPool.scheduleWithFixedDelay(periodicShardSyncer, INITIAL_DELAY, leasesRecoveryAuditorExecutionFrequencyMillis, + shardSyncThreadPool.scheduleWithFixedDelay( + periodicShardSyncer, + INITIAL_DELAY, + leasesRecoveryAuditorExecutionFrequencyMillis, TimeUnit.MILLISECONDS); isRunning = true; - } return new TaskResult(null); } @@ -167,45 +207,82 @@ class PeriodicShardSyncManager { } private void runShardSync() { - if (leaderDecider.isLeader(workerId)) { + if (leaderDecider.isLeader(workerId) && leaderSynced.get()) { log.info(String.format("WorkerId %s is leader, running the periodic shard sync task", workerId)); - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, - PERIODIC_SHARD_SYNC_MANAGER); + final MetricsScope scope = + MetricsUtil.createMetricsWithOperation(metricsFactory, PERIODIC_SHARD_SYNC_MANAGER); int numStreamsWithPartialLeases = 0; int numStreamsToSync = 0; + int numSkippedShardSyncTask = 0; boolean isRunSuccess = false; final long runStartMillis = System.currentTimeMillis(); try { + // Create a copy of the streams to be considered for this run to avoid data race with Scheduler. + final Set streamConfigMap = new HashSet<>(currentStreamConfigMap.keySet()); + // Construct the stream to leases map to be used in the lease sync - final Map> streamToLeasesMap = getStreamToLeasesMap( - currentStreamConfigMap.keySet()); + final Map> streamToLeasesMap = getStreamToLeasesMap(streamConfigMap); // For each of the stream, check if shard sync needs to be done based on the leases state. - for (Map.Entry streamConfigEntry : currentStreamConfigMap.entrySet()) { - final ShardSyncResponse shardSyncResponse = checkForShardSync(streamConfigEntry.getKey(), - streamToLeasesMap.get(streamConfigEntry.getKey())); + for (StreamIdentifier streamIdentifier : streamConfigMap) { + if (!currentStreamConfigMap.containsKey(streamIdentifier)) { + log.info("Skipping shard sync task for {} as stream is purged", streamIdentifier); + continue; + } + final ShardSyncResponse shardSyncResponse = + checkForShardSync(streamIdentifier, streamToLeasesMap.get(streamIdentifier)); numStreamsWithPartialLeases += shardSyncResponse.isHoleDetected() ? 1 : 0; numStreamsToSync += shardSyncResponse.shouldDoShardSync ? 1 : 0; if (shardSyncResponse.shouldDoShardSync()) { - log.info("Periodic shard syncer initiating shard sync for {} due to the reason - {} ", - streamConfigEntry.getKey(), shardSyncResponse.reasonForDecision()); - final ShardSyncTaskManager shardSyncTaskManager = shardSyncTaskManagerProvider - .apply(streamConfigEntry.getValue()); + log.info( + "Periodic shard syncer initiating shard sync for {} due to the reason - {} ", + streamIdentifier, + shardSyncResponse.reasonForDecision()); + final StreamConfig streamConfig = currentStreamConfigMap.get(streamIdentifier); + if (streamConfig == null) { + log.info("Skipping shard sync task for {} as stream is purged", streamIdentifier); + continue; + } + final ShardSyncTaskManager shardSyncTaskManager; + if (streamToShardSyncTaskManagerMap.containsKey(streamConfig)) { + log.info( + "shardSyncTaskManager for stream {} already exists", streamIdentifier.streamName()); + shardSyncTaskManager = streamToShardSyncTaskManagerMap.get(streamConfig); + } else { + // If streamConfig of a stream has already been added to currentStreamConfigMap but + // Scheduler failed to create shardSyncTaskManager for it, then Scheduler will not try + // to create one later. So enable PeriodicShardSyncManager to do it for such cases + log.info( + "Failed to get shardSyncTaskManager so creating one for stream {}.", + streamIdentifier.streamName()); + shardSyncTaskManager = streamToShardSyncTaskManagerMap.computeIfAbsent( + streamConfig, s -> shardSyncTaskManagerProvider.apply(s)); + } if (!shardSyncTaskManager.submitShardSyncTask()) { log.warn( "Failed to submit shard sync task for stream {}. This could be due to the previous pending shard sync task.", - shardSyncTaskManager.shardDetector().streamIdentifier().streamName()); + shardSyncTaskManager + .shardDetector() + .streamIdentifier() + .streamName()); + numSkippedShardSyncTask += 1; } else { - log.info("Submitted shard sync task for stream {} because of reason {}", - shardSyncTaskManager.shardDetector().streamIdentifier().streamName(), + log.info( + "Submitted shard sync task for stream {} because of reason {}", + shardSyncTaskManager + .shardDetector() + .streamIdentifier() + .streamName(), shardSyncResponse.reasonForDecision()); } } else { - log.info("Skipping shard sync for {} due to the reason - {}", streamConfigEntry.getKey(), + log.info( + "Skipping shard sync for {} due to the reason - {}", + streamIdentifier, shardSyncResponse.reasonForDecision()); } } @@ -213,8 +290,14 @@ class PeriodicShardSyncManager { } catch (Exception e) { log.error("Caught exception while running periodic shard syncer.", e); } finally { - scope.addData("NumStreamsWithPartialLeases", numStreamsWithPartialLeases, StandardUnit.COUNT, MetricsLevel.SUMMARY); + scope.addData( + "NumStreamsWithPartialLeases", + numStreamsWithPartialLeases, + StandardUnit.COUNT, + MetricsLevel.SUMMARY); scope.addData("NumStreamsToSync", numStreamsToSync, StandardUnit.COUNT, MetricsLevel.SUMMARY); + scope.addData( + "NumSkippedShardSyncTask", numSkippedShardSyncTask, StandardUnit.COUNT, MetricsLevel.SUMMARY); MetricsUtil.addSuccessAndLatency(scope, isRunSuccess, runStartMillis, MetricsLevel.SUMMARY); scope.end(); } @@ -241,17 +324,18 @@ class PeriodicShardSyncManager { } else { final Map> streamToLeasesMap = new HashMap<>(); for (Lease lease : leases) { - StreamIdentifier streamIdentifier = StreamIdentifier - .multiStreamInstance(((MultiStreamLease) lease).streamIdentifier()); + StreamIdentifier streamIdentifier = + StreamIdentifier.multiStreamInstance(((MultiStreamLease) lease).streamIdentifier()); if (streamIdentifiersToFilter.contains(streamIdentifier)) { - streamToLeasesMap.computeIfAbsent(streamIdentifier, s -> new ArrayList<>()).add(lease); + streamToLeasesMap + .computeIfAbsent(streamIdentifier, s -> new ArrayList<>()) + .add(lease); } } return streamToLeasesMap; } } - /** * Given a list of leases for a stream, determine if a shard sync is necessary. * @param streamIdentifier @@ -272,11 +356,13 @@ class PeriodicShardSyncManager { // If hole is determined with high confidence return true; return false otherwise // We are using the high confidence factor to avoid shard sync on any holes during resharding and // lease cleanups or any intermittent issues. - final HashRangeHoleTracker hashRangeHoleTracker = hashRangeHoleTrackerMap - .computeIfAbsent(streamIdentifier, s -> new HashRangeHoleTracker()); - final boolean hasHoleWithHighConfidence = hashRangeHoleTracker - .hasHighConfidenceOfHoleWith(hashRangeHoleOpt.get()); - return new ShardSyncResponse(hasHoleWithHighConfidence, true, + final HashRangeHoleTracker hashRangeHoleTracker = + hashRangeHoleTrackerMap.computeIfAbsent(streamIdentifier, s -> new HashRangeHoleTracker()); + final boolean hasHoleWithHighConfidence = + hashRangeHoleTracker.hasHighConfidenceOfHoleWith(hashRangeHoleOpt.get()); + return new ShardSyncResponse( + hasHoleWithHighConfidence, + true, "Detected same hole for " + hashRangeHoleTracker.getNumConsecutiveHoles() + " times. Shard sync will be initiated when threshold reaches " + leasesRecoveryAuditorInconsistencyConfidenceThreshold); @@ -312,7 +398,9 @@ class PeriodicShardSyncManager { Optional hasHoleInLeases(StreamIdentifier streamIdentifier, List leases) { // Filter the leases with any checkpoint other than shard end. List activeLeases = leases.stream() - .filter(lease -> lease.checkpoint() != null && !lease.checkpoint().isShardEnd()).collect(Collectors.toList()); + .filter(lease -> + lease.checkpoint() != null && !lease.checkpoint().isShardEnd()) + .collect(Collectors.toList()); List activeLeasesWithHashRanges = fillWithHashRangesIfRequired(streamIdentifier, activeLeases); return checkForHoleInHashKeyRanges(streamIdentifier, activeLeasesWithHashRanges); } @@ -321,78 +409,105 @@ class PeriodicShardSyncManager { // by learning from kinesis shards. private List fillWithHashRangesIfRequired(StreamIdentifier streamIdentifier, List activeLeases) { List activeLeasesWithNoHashRanges = activeLeases.stream() - .filter(lease -> lease.hashKeyRangeForLease() == null).collect(Collectors.toList()); + .filter(lease -> lease.hashKeyRangeForLease() == null) + .collect(Collectors.toList()); Optional minLeaseOpt = activeLeasesWithNoHashRanges.stream().min(Comparator.comparing(Lease::leaseKey)); if (minLeaseOpt.isPresent()) { // TODO : use minLease for new ListShards with startingShardId final Lease minLease = minLeaseOpt.get(); final ShardDetector shardDetector = shardSyncTaskManagerProvider - .apply(currentStreamConfigMap.get(streamIdentifier)).shardDetector(); - final Map kinesisShards = shardDetector.listShards().stream() - .collect(Collectors.toMap(Shard::shardId, shard -> shard)); - return activeLeases.stream().map(lease -> { - if (lease.hashKeyRangeForLease() == null) { - final String shardId = lease instanceof MultiStreamLease ? - ((MultiStreamLease) lease).shardId() : - lease.leaseKey(); - final Shard shard = kinesisShards.get(shardId); - if(shard == null) { + .apply(currentStreamConfigMap.get(streamIdentifier)) + .shardDetector(); + final Map kinesisShards = + shardDetector.listShards().stream().collect(Collectors.toMap(Shard::shardId, shard -> shard)); + return activeLeases.stream() + .map(lease -> { + if (lease.hashKeyRangeForLease() == null) { + final String shardId = lease instanceof MultiStreamLease + ? ((MultiStreamLease) lease).shardId() + : lease.leaseKey(); + final Shard shard = kinesisShards.get(shardId); + if (shard == null) { + return lease; + } + lease.hashKeyRange(fromHashKeyRange(shard.hashKeyRange())); + try { + leaseRefresher.updateLeaseWithMetaInfo(lease, UpdateField.HASH_KEY_RANGE); + } catch (Exception e) { + log.warn( + "Unable to update hash range key information for lease {} of stream {}." + + "This may result in explicit lease sync.", + lease.leaseKey(), + streamIdentifier); + } + } return lease; - } - lease.hashKeyRange(fromHashKeyRange(shard.hashKeyRange())); - try { - leaseRefresher.updateLeaseWithMetaInfo(lease, UpdateField.HASH_KEY_RANGE); - } catch (Exception e) { - log.warn( - "Unable to update hash range key information for lease {} of stream {}. This may result in explicit lease sync.", - lease.leaseKey(), streamIdentifier); - } - } - return lease; - }).filter(lease -> lease.hashKeyRangeForLease() != null).collect(Collectors.toList()); + }) + .filter(lease -> lease.hashKeyRangeForLease() != null) + .collect(Collectors.toList()); } else { return activeLeases; } } @VisibleForTesting - static Optional checkForHoleInHashKeyRanges(StreamIdentifier streamIdentifier, - List leasesWithHashKeyRanges) { + static Optional checkForHoleInHashKeyRanges( + StreamIdentifier streamIdentifier, List leasesWithHashKeyRanges) { // Sort the hash ranges by starting hash key. List sortedLeasesWithHashKeyRanges = sortLeasesByHashRange(leasesWithHashKeyRanges); - if(sortedLeasesWithHashKeyRanges.isEmpty()) { + if (sortedLeasesWithHashKeyRanges.isEmpty()) { log.error("No leases with valid hashranges found for stream {}", streamIdentifier); return Optional.of(new HashRangeHole()); } // Validate for hashranges bounds. - if (!sortedLeasesWithHashKeyRanges.get(0).hashKeyRangeForLease().startingHashKey().equals(MIN_HASH_KEY) || !sortedLeasesWithHashKeyRanges - .get(sortedLeasesWithHashKeyRanges.size() - 1).hashKeyRangeForLease().endingHashKey().equals(MAX_HASH_KEY)) { - log.error("Incomplete hash range found for stream {} between {} and {}.", streamIdentifier, + if (!sortedLeasesWithHashKeyRanges + .get(0) + .hashKeyRangeForLease() + .startingHashKey() + .equals(MIN_HASH_KEY) + || !sortedLeasesWithHashKeyRanges + .get(sortedLeasesWithHashKeyRanges.size() - 1) + .hashKeyRangeForLease() + .endingHashKey() + .equals(MAX_HASH_KEY)) { + log.error( + "Incomplete hash range found for stream {} between {} and {}.", + streamIdentifier, sortedLeasesWithHashKeyRanges.get(0), sortedLeasesWithHashKeyRanges.get(sortedLeasesWithHashKeyRanges.size() - 1)); - return Optional.of(new HashRangeHole(sortedLeasesWithHashKeyRanges.get(0).hashKeyRangeForLease(), - sortedLeasesWithHashKeyRanges.get(sortedLeasesWithHashKeyRanges.size() - 1).hashKeyRangeForLease())); + return Optional.of(new HashRangeHole( + sortedLeasesWithHashKeyRanges.get(0).hashKeyRangeForLease(), + sortedLeasesWithHashKeyRanges + .get(sortedLeasesWithHashKeyRanges.size() - 1) + .hashKeyRangeForLease())); } // Check for any holes in the sorted hashrange intervals. if (sortedLeasesWithHashKeyRanges.size() > 1) { Lease leftMostLeaseToReportInCaseOfHole = sortedLeasesWithHashKeyRanges.get(0); HashKeyRangeForLease leftLeaseHashRange = leftMostLeaseToReportInCaseOfHole.hashKeyRangeForLease(); for (int i = 1; i < sortedLeasesWithHashKeyRanges.size(); i++) { - final HashKeyRangeForLease rightLeaseHashRange = sortedLeasesWithHashKeyRanges.get(i).hashKeyRangeForLease(); - final BigInteger rangeDiff = rightLeaseHashRange.startingHashKey().subtract(leftLeaseHashRange.endingHashKey()); + final HashKeyRangeForLease rightLeaseHashRange = + sortedLeasesWithHashKeyRanges.get(i).hashKeyRangeForLease(); + final BigInteger rangeDiff = + rightLeaseHashRange.startingHashKey().subtract(leftLeaseHashRange.endingHashKey()); // Case of overlapping leases when the rangediff is 0 or negative. // signum() will be -1 for negative and 0 if value is 0. // Merge the range for further tracking. if (rangeDiff.signum() <= 0) { - leftLeaseHashRange = new HashKeyRangeForLease(leftLeaseHashRange.startingHashKey(), + leftLeaseHashRange = new HashKeyRangeForLease( + leftLeaseHashRange.startingHashKey(), leftLeaseHashRange.endingHashKey().max(rightLeaseHashRange.endingHashKey())); } else { // Case of non overlapping leases when rangediff is positive. signum() will be 1 for positive. // If rangeDiff is 1, then it is a case of continuous hashrange. If not, it is a hole. if (!rangeDiff.equals(BigInteger.ONE)) { - log.error("Incomplete hash range found for {} between {} and {}.", streamIdentifier, - leftMostLeaseToReportInCaseOfHole, sortedLeasesWithHashKeyRanges.get(i)); - return Optional.of(new HashRangeHole(leftMostLeaseToReportInCaseOfHole.hashKeyRangeForLease(), + log.error( + "Incomplete hash range found for {} between {} and {}.", + streamIdentifier, + leftMostLeaseToReportInCaseOfHole, + sortedLeasesWithHashKeyRanges.get(i)); + return Optional.of(new HashRangeHole( + leftMostLeaseToReportInCaseOfHole.hashKeyRangeForLease(), sortedLeasesWithHashKeyRanges.get(i).hashKeyRangeForLease())); } leftMostLeaseToReportInCaseOfHole = sortedLeasesWithHashKeyRanges.get(i); @@ -405,8 +520,9 @@ class PeriodicShardSyncManager { @VisibleForTesting static List sortLeasesByHashRange(List leasesWithHashKeyRanges) { - if (leasesWithHashKeyRanges.size() == 0 || leasesWithHashKeyRanges.size() == 1) + if (leasesWithHashKeyRanges.size() == 0 || leasesWithHashKeyRanges.size() == 1) { return leasesWithHashKeyRanges; + } Collections.sort(leasesWithHashKeyRanges, new HashKeyRangeComparator()); return leasesWithHashKeyRanges; } @@ -417,7 +533,9 @@ class PeriodicShardSyncManager { hashRangeAtStartOfPossibleHole = hashRangeAtEndOfPossibleHole = null; } - HashRangeHole(HashKeyRangeForLease hashRangeAtStartOfPossibleHole, HashKeyRangeForLease hashRangeAtEndOfPossibleHole) { + HashRangeHole( + HashKeyRangeForLease hashRangeAtStartOfPossibleHole, + HashKeyRangeForLease hashRangeAtEndOfPossibleHole) { this.hashRangeAtStartOfPossibleHole = hashRangeAtStartOfPossibleHole; this.hashRangeAtEndOfPossibleHole = hashRangeAtEndOfPossibleHole; } @@ -428,6 +546,7 @@ class PeriodicShardSyncManager { private class HashRangeHoleTracker { private HashRangeHole hashRangeHole; + @Getter private Integer numConsecutiveHoles; @@ -456,8 +575,12 @@ class PeriodicShardSyncManager { Validate.notNull(lease.hashKeyRangeForLease()); Validate.notNull(otherLease.hashKeyRangeForLease()); return ComparisonChain.start() - .compare(lease.hashKeyRangeForLease().startingHashKey(), otherLease.hashKeyRangeForLease().startingHashKey()) - .compare(lease.hashKeyRangeForLease().endingHashKey(), otherLease.hashKeyRangeForLease().endingHashKey()) + .compare( + lease.hashKeyRangeForLease().startingHashKey(), + otherLease.hashKeyRangeForLease().startingHashKey()) + .compare( + lease.hashKeyRangeForLease().endingHashKey(), + otherLease.hashKeyRangeForLease().endingHashKey()) .result(); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/RejectedTaskEvent.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/RejectedTaskEvent.java index 7dc8dfaf..df8445e1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/RejectedTaskEvent.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/RejectedTaskEvent.java @@ -25,8 +25,8 @@ import software.amazon.kinesis.annotations.KinesisClientInternalApi; @Slf4j @KinesisClientInternalApi class RejectedTaskEvent implements DiagnosticEvent { - private static final String MESSAGE = "Review your thread configuration to prevent task rejections. " + - "Task rejections will slow down your application and some shards may stop processing. "; + private static final String MESSAGE = "Review your thread configuration to prevent task rejections. " + + "Task rejections will slow down your application and some shards may stop processing. "; private ExecutorStateEvent executorStateEvent; private Throwable throwable; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java index 743be28a..3d2b6c41 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java @@ -15,10 +15,6 @@ package software.amazon.kinesis.coordinator; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Stopwatch; - -import io.reactivex.rxjava3.plugins.RxJavaPlugins; import java.time.Duration; import java.time.Instant; import java.util.ArrayList; @@ -41,15 +37,23 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Stopwatch; +import io.reactivex.rxjava3.plugins.RxJavaPlugins; import lombok.AccessLevel; import lombok.Getter; import lombok.NoArgsConstructor; import lombok.NonNull; +import lombok.RequiredArgsConstructor; import lombok.experimental.Accessors; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.utils.Validate; import software.amazon.kinesis.checkpoint.CheckpointConfig; import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; @@ -89,14 +93,15 @@ import software.amazon.kinesis.processor.Checkpointer; import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy; import software.amazon.kinesis.processor.ProcessorConfig; import software.amazon.kinesis.processor.ShardRecordProcessorFactory; -import software.amazon.kinesis.processor.ShutdownNotificationAware; import software.amazon.kinesis.processor.StreamTracker; import software.amazon.kinesis.retrieval.AggregatorUtil; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.RetrievalConfig; import software.amazon.kinesis.schemaregistry.SchemaRegistryDecoder; +import static software.amazon.kinesis.common.ArnUtil.constructStreamArn; import static software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.StreamsLeasesDeletionType; +import static software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.StreamsLeasesDeletionType.FORMER_STREAMS_AUTO_DETECTION_DEFERRED_DELETION; /** * @@ -116,6 +121,7 @@ public class Scheduler implements Runnable { private static final String ACTIVE_STREAMS_COUNT = "ActiveStreams.Count"; private static final String PENDING_STREAMS_DELETION_COUNT = "StreamsPendingDeletion.Count"; private static final String DELETED_STREAMS_COUNT = "DeletedStreams.Count"; + private static final String NON_EXISTING_STREAM_DELETE_COUNT = "NonExistingStreamDelete.Count"; private final SchedulerLog slog = new SchedulerLog(); @@ -139,7 +145,7 @@ public class Scheduler implements Runnable { private final DiagnosticEventHandler diagnosticEventHandler; private final LeaseCoordinator leaseCoordinator; private final Function shardSyncTaskManagerProvider; - private final Map streamToShardSyncTaskManagerMap = new HashMap<>(); + private final Map streamToShardSyncTaskManagerMap = new ConcurrentHashMap<>(); private final PeriodicShardSyncManager leaderElectedPeriodicShardSyncManager; private final ShardPrioritization shardPrioritization; private final boolean cleanupLeasesUponShardCompletion; @@ -150,7 +156,7 @@ public class Scheduler implements Runnable { private final long failoverTimeMillis; private final long taskBackoffTimeMillis; private final boolean isMultiStreamMode; - private final Map currentStreamConfigMap = new ConcurrentHashMap<>(); + private final Map currentStreamConfigMap = new StreamConfigMap(); private final StreamTracker streamTracker; private final FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy; private final long listShardsBackoffTimeMillis; @@ -166,6 +172,8 @@ public class Scheduler implements Runnable { private final LeaseCleanupManager leaseCleanupManager; private final SchemaRegistryDecoder schemaRegistryDecoder; + private final DeletedStreamListProvider deletedStreamListProvider; + // Holds consumers for shards the worker is currently tracking. Key is shard // info, value is ShardConsumer. private final ConcurrentMap shardInfoShardConsumerMap = new ConcurrentHashMap<>(); @@ -177,24 +185,44 @@ public class Scheduler implements Runnable { private final Object lock = new Object(); private final Stopwatch streamSyncWatch = Stopwatch.createUnstarted(); + private boolean leasesSyncedOnAppInit = false; + @Getter(AccessLevel.NONE) + private final AtomicBoolean leaderSynced = new AtomicBoolean(false); + /** * Used to ensure that only one requestedShutdown is in progress at a time. */ private CompletableFuture gracefulShutdownFuture; + + /** + * CountDownLatch used by the GracefulShutdownCoordinator. Reaching zero means that + * the scheduler's finalShutdown() call has completed. + */ + @Getter(AccessLevel.NONE) + private final CountDownLatch finalShutdownLatch = new CountDownLatch(1); + @VisibleForTesting protected boolean gracefuleShutdownStarted = false; - public Scheduler(@NonNull final CheckpointConfig checkpointConfig, - @NonNull final CoordinatorConfig coordinatorConfig, - @NonNull final LeaseManagementConfig leaseManagementConfig, - @NonNull final LifecycleConfig lifecycleConfig, - @NonNull final MetricsConfig metricsConfig, - @NonNull final ProcessorConfig processorConfig, - @NonNull final RetrievalConfig retrievalConfig) { - this(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, metricsConfig, - processorConfig, retrievalConfig, new DiagnosticEventFactory()); + public Scheduler( + @NonNull final CheckpointConfig checkpointConfig, + @NonNull final CoordinatorConfig coordinatorConfig, + @NonNull final LeaseManagementConfig leaseManagementConfig, + @NonNull final LifecycleConfig lifecycleConfig, + @NonNull final MetricsConfig metricsConfig, + @NonNull final ProcessorConfig processorConfig, + @NonNull final RetrievalConfig retrievalConfig) { + this( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig, + new DiagnosticEventFactory()); } /** @@ -202,14 +230,15 @@ public class Scheduler implements Runnable { * is desired for testing. This constructor is only used for testing to provide a mock DiagnosticEventFactory. */ @VisibleForTesting - protected Scheduler(@NonNull final CheckpointConfig checkpointConfig, - @NonNull final CoordinatorConfig coordinatorConfig, - @NonNull final LeaseManagementConfig leaseManagementConfig, - @NonNull final LifecycleConfig lifecycleConfig, - @NonNull final MetricsConfig metricsConfig, - @NonNull final ProcessorConfig processorConfig, - @NonNull final RetrievalConfig retrievalConfig, - @NonNull final DiagnosticEventFactory diagnosticEventFactory) { + protected Scheduler( + @NonNull final CheckpointConfig checkpointConfig, + @NonNull final CoordinatorConfig coordinatorConfig, + @NonNull final LeaseManagementConfig leaseManagementConfig, + @NonNull final LifecycleConfig lifecycleConfig, + @NonNull final MetricsConfig metricsConfig, + @NonNull final ProcessorConfig processorConfig, + @NonNull final RetrievalConfig retrievalConfig, + @NonNull final DiagnosticEventFactory diagnosticEventFactory) { this.checkpointConfig = checkpointConfig; this.coordinatorConfig = coordinatorConfig; this.leaseManagementConfig = leaseManagementConfig; @@ -222,15 +251,14 @@ public class Scheduler implements Runnable { this.streamTracker = retrievalConfig.streamTracker(); this.isMultiStreamMode = streamTracker.isMultiStream(); this.formerStreamsLeasesDeletionStrategy = streamTracker.formerStreamsLeasesDeletionStrategy(); - streamTracker.streamConfigList().forEach( - sc -> currentStreamConfigMap.put(sc.streamIdentifier(), sc)); + streamTracker.streamConfigList().forEach(sc -> currentStreamConfigMap.put(sc.streamIdentifier(), sc)); + log.info("Initial state: {}", currentStreamConfigMap.values()); this.maxInitializationAttempts = this.coordinatorConfig.maxInitializationAttempts(); this.metricsFactory = this.metricsConfig.metricsFactory(); // Determine leaseSerializer based on availability of MultiStreamTracker. - final LeaseSerializer leaseSerializer = isMultiStreamMode ? - new DynamoDBMultiStreamLeaseSerializer() : - new DynamoDBLeaseSerializer(); + final LeaseSerializer leaseSerializer = + isMultiStreamMode ? new DynamoDBMultiStreamLeaseSerializer() : new DynamoDBLeaseSerializer(); this.leaseCoordinator = this.leaseManagementConfig .leaseManagementFactory(leaseSerializer, isMultiStreamMode) .createLeaseCoordinator(this.metricsFactory); @@ -239,8 +267,9 @@ public class Scheduler implements Runnable { // // TODO: Figure out what to do with lease manage <=> checkpoint relationship // - this.checkpoint = this.checkpointConfig.checkpointFactory().createCheckpointer(this.leaseCoordinator, - this.leaseRefresher); + this.checkpoint = this.checkpointConfig + .checkpointFactory() + .createCheckpointer(this.leaseCoordinator, this.leaseRefresher); // // TODO: Move this configuration to lifecycle @@ -250,9 +279,10 @@ public class Scheduler implements Runnable { this.executorService = this.coordinatorConfig.coordinatorFactory().createExecutorService(); this.diagnosticEventFactory = diagnosticEventFactory; this.diagnosticEventHandler = new DiagnosticEventLogger(); + this.deletedStreamListProvider = new DeletedStreamListProvider(); this.shardSyncTaskManagerProvider = streamConfig -> this.leaseManagementConfig .leaseManagementFactory(leaseSerializer, isMultiStreamMode) - .createShardSyncTaskManager(this.metricsFactory, streamConfig); + .createShardSyncTaskManager(this.metricsFactory, streamConfig, this.deletedStreamListProvider); this.shardPrioritization = this.coordinatorConfig.shardPrioritization(); this.cleanupLeasesUponShardCompletion = this.leaseManagementConfig.cleanupLeasesUponShardCompletion(); this.skipShardSyncAtWorkerInitializationIfLeasesExist = @@ -260,39 +290,46 @@ public class Scheduler implements Runnable { if (coordinatorConfig.gracefulShutdownCoordinator() != null) { this.gracefulShutdownCoordinator = coordinatorConfig.gracefulShutdownCoordinator(); } else { - this.gracefulShutdownCoordinator = this.coordinatorConfig.coordinatorFactory() - .createGracefulShutdownCoordinator(); + this.gracefulShutdownCoordinator = + this.coordinatorConfig.coordinatorFactory().createGracefulShutdownCoordinator(); } if (coordinatorConfig.workerStateChangeListener() != null) { this.workerStateChangeListener = coordinatorConfig.workerStateChangeListener(); } else { - this.workerStateChangeListener = this.coordinatorConfig.coordinatorFactory() - .createWorkerStateChangeListener(); + this.workerStateChangeListener = + this.coordinatorConfig.coordinatorFactory().createWorkerStateChangeListener(); } - this.leaderDecider = new DeterministicShuffleShardSyncLeaderDecider(leaseRefresher, - Executors.newSingleThreadScheduledExecutor(), - PERIODIC_SHARD_SYNC_MAX_WORKERS_DEFAULT); + this.leaderDecider = new DeterministicShuffleShardSyncLeaderDecider( + leaseRefresher, Executors.newSingleThreadScheduledExecutor(), PERIODIC_SHARD_SYNC_MAX_WORKERS_DEFAULT); this.failoverTimeMillis = this.leaseManagementConfig.failoverTimeMillis(); this.taskBackoffTimeMillis = this.lifecycleConfig.taskBackoffTimeMillis(); -// this.retryGetRecordsInSeconds = this.retrievalConfig.retryGetRecordsInSeconds(); -// this.maxGetRecordsThreadPool = this.retrievalConfig.maxGetRecordsThreadPool(); this.listShardsBackoffTimeMillis = this.retrievalConfig.listShardsBackoffTimeInMillis(); this.maxListShardsRetryAttempts = this.retrievalConfig.maxListShardsRetryAttempts(); - this.shardDetectorProvider = streamConfig -> createOrGetShardSyncTaskManager(streamConfig).shardDetector(); + this.shardDetectorProvider = + streamConfig -> createOrGetShardSyncTaskManager(streamConfig).shardDetector(); this.ignoreUnexpetedChildShards = this.leaseManagementConfig.ignoreUnexpectedChildShards(); this.aggregatorUtil = this.lifecycleConfig.aggregatorUtil(); - this.hierarchicalShardSyncerProvider = streamConfig -> createOrGetShardSyncTaskManager(streamConfig).hierarchicalShardSyncer(); - this.schedulerInitializationBackoffTimeMillis = this.coordinatorConfig.schedulerInitializationBackoffTimeMillis(); + this.hierarchicalShardSyncerProvider = + streamConfig -> createOrGetShardSyncTaskManager(streamConfig).hierarchicalShardSyncer(); + this.schedulerInitializationBackoffTimeMillis = + this.coordinatorConfig.schedulerInitializationBackoffTimeMillis(); this.leaderElectedPeriodicShardSyncManager = new PeriodicShardSyncManager( - leaseManagementConfig.workerIdentifier(), leaderDecider, leaseRefresher, currentStreamConfigMap, - shardSyncTaskManagerProvider, isMultiStreamMode, metricsFactory, + leaseManagementConfig.workerIdentifier(), + leaderDecider, + leaseRefresher, + currentStreamConfigMap, + shardSyncTaskManagerProvider, + streamToShardSyncTaskManagerMap, + isMultiStreamMode, + metricsFactory, leaseManagementConfig.leasesRecoveryAuditorExecutionFrequencyMillis(), - leaseManagementConfig.leasesRecoveryAuditorInconsistencyConfidenceThreshold()); - this.leaseCleanupManager = this.leaseManagementConfig.leaseManagementFactory(leaseSerializer, isMultiStreamMode) + leaseManagementConfig.leasesRecoveryAuditorInconsistencyConfidenceThreshold(), + leaderSynced); + this.leaseCleanupManager = this.leaseManagementConfig + .leaseManagementFactory(leaseSerializer, isMultiStreamMode) .createLeaseCleanupManager(metricsFactory); - this.schemaRegistryDecoder = - this.retrievalConfig.glueSchemaRegistryDeserializer() == null ? - null + this.schemaRegistryDecoder = this.retrievalConfig.glueSchemaRegistryDeserializer() == null + ? null : new SchemaRegistryDecoder(this.retrievalConfig.glueSchemaRegistryDeserializer()); } @@ -336,9 +373,10 @@ public class Scheduler implements Runnable { if (!skipShardSyncAtWorkerInitializationIfLeasesExist || leaseRefresher.isLeaseTableEmpty()) { if (shouldInitiateLeaseSync()) { - log.info("Worker {} is initiating the lease sync.", leaseManagementConfig.workerIdentifier()); + log.info( + "Worker {} is initiating the lease sync.", + leaseManagementConfig.workerIdentifier()); leaderElectedPeriodicShardSyncManager.syncShardsOnce(); - } } else { log.info("Skipping shard sync per configuration setting (and lease table is not empty)"); @@ -381,13 +419,15 @@ public class Scheduler implements Runnable { } @VisibleForTesting - boolean shouldInitiateLeaseSync() throws InterruptedException, - DependencyException, ProvisionedThroughputException, InvalidStateException { - long waitTime = ThreadLocalRandom.current().nextLong(MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS, MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS); + boolean shouldInitiateLeaseSync() + throws InterruptedException, DependencyException, ProvisionedThroughputException, InvalidStateException { + long waitTime = ThreadLocalRandom.current() + .nextLong(MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS, MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS); long waitUntil = System.currentTimeMillis() + waitTime; boolean shouldInitiateLeaseSync = true; - while (System.currentTimeMillis() < waitUntil && (shouldInitiateLeaseSync = leaseRefresher.isLeaseTableEmpty())) { + while (System.currentTimeMillis() < waitUntil + && (shouldInitiateLeaseSync = leaseRefresher.isLeaseTableEmpty())) { // check every 3 seconds if lease table is still empty, // to minimize contention between all workers bootstrapping at the same time log.info("Lease table is still empty. Checking again in {} ms", LEASE_TABLE_CHECK_FREQUENCY_MILLIS); @@ -401,8 +441,8 @@ public class Scheduler implements Runnable { try { Set assignedShards = new HashSet<>(); for (ShardInfo shardInfo : getShardInfoForAssignments()) { - ShardConsumer shardConsumer = createOrGetShardConsumer(shardInfo, - processorConfig.shardRecordProcessorFactory(), leaseCleanupManager); + ShardConsumer shardConsumer = createOrGetShardConsumer( + shardInfo, processorConfig.shardRecordProcessorFactory(), leaseCleanupManager); shardConsumer.executeLifecycle(); assignedShards.add(shardInfo); @@ -414,14 +454,19 @@ public class Scheduler implements Runnable { // check for new streams and sync with the scheduler state if (isLeader()) { checkAndSyncStreamShardsAndLeases(); + leaderSynced.set(true); + } else { + leaderSynced.set(false); } logExecutorState(); slog.info("Sleeping ..."); Thread.sleep(shardConsumerDispatchPollIntervalMillis); } catch (Exception e) { - log.error("Worker.run caught exception, sleeping for {} milli seconds!", - String.valueOf(shardConsumerDispatchPollIntervalMillis), e); + log.error( + "Worker.run caught exception, sleeping for {} milli seconds!", + shardConsumerDispatchPollIntervalMillis, + e); try { Thread.sleep(shardConsumerDispatchPollIntervalMillis); } catch (InterruptedException ex) { @@ -446,20 +491,40 @@ public class Scheduler implements Runnable { final Set streamsSynced = new HashSet<>(); if (shouldSyncStreamsNow()) { - final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, MULTI_STREAM_TRACKER); + final MetricsScope metricsScope = + MetricsUtil.createMetricsWithOperation(metricsFactory, MULTI_STREAM_TRACKER); try { - final Map newStreamConfigMap = streamTracker.streamConfigList() - .stream().collect(Collectors.toMap(StreamConfig::streamIdentifier, Function.identity())); - - List leases; - + final Map newStreamConfigMap = streamTracker.streamConfigList().stream() + .collect(Collectors.toMap(StreamConfig::streamIdentifier, Function.identity())); // This is done to ensure that we clean up the stale streams lingering in the lease table. - if (!leasesSyncedOnAppInit && isMultiStreamMode) { - leases = fetchMultiStreamLeases(); - syncStreamsFromLeaseTableOnAppInit(leases); - leasesSyncedOnAppInit = true; + // Only sync from lease table again if the currentStreamConfigMap and newStreamConfigMap contain + // different set of streams and Leader has not synced the leases yet + // or this is the first app bootstrap. + if ((!leaderSynced.get() && !newStreamConfigMap.keySet().equals(currentStreamConfigMap.keySet())) + || !leasesSyncedOnAppInit) { + log.info("Syncing leases for leader to catch up"); + final List leaseTableLeases = fetchMultiStreamLeases(); + syncStreamsFromLeaseTableOnAppInit(leaseTableLeases); + final Set streamsFromLeaseTable = leaseTableLeases.stream() + .map(lease -> StreamIdentifier.multiStreamInstance(lease.streamIdentifier())) + .collect(Collectors.toSet()); + // Remove stream from currentStreamConfigMap if this stream in not in the lease table and + // newStreamConfigMap. + // This means that the leases have already been deleted by the last leader. + currentStreamConfigMap.keySet().stream() + .filter(streamIdentifier -> !newStreamConfigMap.containsKey(streamIdentifier) + && !streamsFromLeaseTable.contains(streamIdentifier)) + .forEach(stream -> { + log.info( + "Removing stream {} from currentStreamConfigMap due to not being active", + stream); + currentStreamConfigMap.remove(stream); + staleStreamDeletionMap.remove(stream); + streamsSynced.add(stream); + }); } + leasesSyncedOnAppInit = true; // For new streams discovered, do a shard sync and update the currentStreamConfigMap for (StreamIdentifier streamIdentifier : newStreamConfigMap.keySet()) { @@ -471,9 +536,7 @@ public class Scheduler implements Runnable { currentStreamConfigMap.put(streamIdentifier, streamConfig); streamsSynced.add(streamIdentifier); } else { - if (log.isDebugEnabled()) { - log.debug(streamIdentifier + " is already being processed - skipping shard sync."); - } + log.debug("{} is already being processed - skipping shard sync.", streamIdentifier); } } @@ -482,19 +545,21 @@ public class Scheduler implements Runnable { staleStreamDeletionMap.putIfAbsent(streamIdentifier, Instant.now()); } }; - - if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() == StreamsLeasesDeletionType.FORMER_STREAMS_AUTO_DETECTION_DEFERRED_DELETION) { + if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() + == FORMER_STREAMS_AUTO_DETECTION_DEFERRED_DELETION) { // Now, we are identifying the stale/old streams and enqueuing it for deferred deletion. // It is assumed that all the workers will always have the latest and consistent snapshot of streams // from the multiStreamTracker. // - // The following streams transition state among two workers are NOT considered safe, where Worker 2, on + // The following streams transition state among two workers are NOT considered safe, where Worker 2, + // on // initialization learn about D from lease table and delete the leases for D, as it is not available // in its latest MultiStreamTracker. // Worker 1 : A,B,C -> A,B,C,D (latest) // Worker 2 : BOOTS_UP -> A,B,C (stale) // - // The following streams transition state among two workers are NOT considered safe, where Worker 2 might + // The following streams transition state among two workers are NOT considered safe, where Worker 2 + // might // end up deleting the leases for A and D and lose progress made so far. // Worker 1 : A,B,C -> A,B,C,D (latest) // Worker 2 : A,B,C -> B,C (stale/partial) @@ -503,26 +568,31 @@ public class Scheduler implements Runnable { // before attempting to delete it, we will be deferring the leases deletion based on the // defer time period. currentStreamConfigMap.keySet().forEach(enqueueStreamLeaseDeletionOperation); - } else if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() == StreamsLeasesDeletionType.PROVIDED_STREAMS_DEFERRED_DELETION) { - Optional.ofNullable(formerStreamsLeasesDeletionStrategy.streamIdentifiersForLeaseCleanup()).ifPresent( - streamIdentifiers -> streamIdentifiers.forEach(enqueueStreamLeaseDeletionOperation)); + } else if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() + == StreamsLeasesDeletionType.PROVIDED_STREAMS_DEFERRED_DELETION) { + Optional.ofNullable(formerStreamsLeasesDeletionStrategy.streamIdentifiersForLeaseCleanup()) + .ifPresent(streamIdentifiers -> + streamIdentifiers.forEach(enqueueStreamLeaseDeletionOperation)); } else { // Remove the old/stale streams identified through the new and existing streams list, without // cleaning up their leases. Disabling deprecated shard sync + lease cleanup through a flag. - Iterator currentSetOfStreamsIter = currentStreamConfigMap.keySet().iterator(); + Iterator currentSetOfStreamsIter = + currentStreamConfigMap.keySet().iterator(); while (currentSetOfStreamsIter.hasNext()) { StreamIdentifier streamIdentifier = currentSetOfStreamsIter.next(); if (!newStreamConfigMap.containsKey(streamIdentifier)) { if (SHOULD_DO_LEASE_SYNC_FOR_OLD_STREAMS) { log.info( - "Found old/deleted stream : {}. Triggering shard sync. Removing from tracked active streams.", streamIdentifier); - ShardSyncTaskManager shardSyncTaskManager = createOrGetShardSyncTaskManager( - currentStreamConfigMap.get(streamIdentifier)); + "Found old/deleted stream : {}. Triggering shard sync. Removing from tracked active streams.", + streamIdentifier); + ShardSyncTaskManager shardSyncTaskManager = + createOrGetShardSyncTaskManager(currentStreamConfigMap.get(streamIdentifier)); shardSyncTaskManager.submitShardSyncTask(); } else { log.info( "Found old/deleted stream : {}. Removing from tracked active streams, but not cleaning up leases," - + " as part of this workflow", streamIdentifier); + + " as part of this workflow", + streamIdentifier); } currentSetOfStreamsIter.remove(); streamsSynced.add(streamIdentifier); @@ -535,10 +605,29 @@ public class Scheduler implements Runnable { // Now let's scan the streamIdentifiersForLeaseCleanup eligible for deferred deletion and delete them. // StreamIdentifiers are eligible for deletion only when the deferment period has elapsed and // the streamIdentifiersForLeaseCleanup are not present in the latest snapshot. - final Map> staleStreamIdDeletionDecisionMap = staleStreamDeletionMap.keySet().stream().collect(Collectors - .partitioningBy(streamIdentifier -> newStreamConfigMap.containsKey(streamIdentifier), Collectors.toSet())); - final Set staleStreamIdsToBeDeleted = staleStreamIdDeletionDecisionMap.get(false).stream().filter(streamIdentifier -> - Duration.between(staleStreamDeletionMap.get(streamIdentifier), Instant.now()).toMillis() >= waitPeriodToDeleteOldStreams.toMillis()).collect(Collectors.toSet()); + final Map> staleStreamIdDeletionDecisionMap = + staleStreamDeletionMap.keySet().stream() + .collect( + Collectors.partitioningBy(newStreamConfigMap::containsKey, Collectors.toSet())); + final Set staleStreamIdsToBeDeleted = + staleStreamIdDeletionDecisionMap.get(false).stream() + .filter(streamIdentifier -> + Duration.between(staleStreamDeletionMap.get(streamIdentifier), Instant.now()) + .toMillis() + >= waitPeriodToDeleteOldStreams.toMillis()) + .collect(Collectors.toSet()); + // These are the streams which are deleted in Kinesis and we encounter resource not found during + // shardSyncTask. This is applicable in MultiStreamMode only, in case of SingleStreamMode, store will + // not have any data. + // Filter streams based on newStreamConfigMap so that we don't override input to KCL in any case. + final Set deletedStreamSet = + this.deletedStreamListProvider.purgeAllDeletedStream().stream() + .filter(streamIdentifier -> !newStreamConfigMap.containsKey(streamIdentifier)) + .collect(Collectors.toSet()); + if (deletedStreamSet.size() > 0) { + log.info("Stale streams to delete: {}", deletedStreamSet); + staleStreamIdsToBeDeleted.addAll(deletedStreamSet); + } final Set deletedStreamsLeases = deleteMultiStreamLeases(staleStreamIdsToBeDeleted); streamsSynced.addAll(deletedStreamsLeases); @@ -549,16 +638,24 @@ public class Scheduler implements Runnable { if (!staleStreamDeletionMap.isEmpty()) { log.warn( "Streams enqueued for deletion for lease table cleanup along with their scheduled time for deletion: {} ", - staleStreamDeletionMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, - entry -> entry.getValue().plus(waitPeriodToDeleteOldStreams)))); + staleStreamDeletionMap.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue() + .plus(waitPeriodToDeleteOldStreams)))); } streamSyncWatch.reset().start(); - MetricsUtil.addCount(metricsScope, ACTIVE_STREAMS_COUNT, newStreamConfigMap.size(), MetricsLevel.SUMMARY); - MetricsUtil.addCount(metricsScope, PENDING_STREAMS_DELETION_COUNT, staleStreamDeletionMap.size(), + MetricsUtil.addCount( + metricsScope, ACTIVE_STREAMS_COUNT, newStreamConfigMap.size(), MetricsLevel.SUMMARY); + MetricsUtil.addCount( + metricsScope, + PENDING_STREAMS_DELETION_COUNT, + staleStreamDeletionMap.size(), MetricsLevel.SUMMARY); - MetricsUtil.addCount(metricsScope, DELETED_STREAMS_COUNT, deletedStreamsLeases.size(), MetricsLevel.SUMMARY); + MetricsUtil.addCount( + metricsScope, NON_EXISTING_STREAM_DELETE_COUNT, deletedStreamSet.size(), MetricsLevel.SUMMARY); + MetricsUtil.addCount( + metricsScope, DELETED_STREAMS_COUNT, deletedStreamsLeases.size(), MetricsLevel.SUMMARY); } finally { MetricsUtil.endScope(metricsScope); } @@ -566,29 +663,31 @@ public class Scheduler implements Runnable { return streamsSynced; } - @VisibleForTesting boolean shouldSyncStreamsNow() { - return isMultiStreamMode && - (streamSyncWatch.elapsed(TimeUnit.MILLISECONDS) > NEW_STREAM_CHECK_INTERVAL_MILLIS); + @VisibleForTesting + boolean shouldSyncStreamsNow() { + return isMultiStreamMode && (streamSyncWatch.elapsed(TimeUnit.MILLISECONDS) > NEW_STREAM_CHECK_INTERVAL_MILLIS); } - @VisibleForTesting void syncStreamsFromLeaseTableOnAppInit(List leases) { - final Set streamIdentifiers = leases.stream() + @VisibleForTesting + void syncStreamsFromLeaseTableOnAppInit(List leases) { + leases.stream() .map(lease -> StreamIdentifier.multiStreamInstance(lease.streamIdentifier())) - .collect(Collectors.toSet()); - for (StreamIdentifier streamIdentifier : streamIdentifiers) { - if (!currentStreamConfigMap.containsKey(streamIdentifier)) { - currentStreamConfigMap.put(streamIdentifier, streamTracker.createStreamConfig(streamIdentifier)); - } - } + .filter(streamIdentifier -> !currentStreamConfigMap.containsKey(streamIdentifier)) + .forEach(streamIdentifier -> { + final StreamConfig streamConfig = streamTracker.createStreamConfig(streamIdentifier); + currentStreamConfigMap.put(streamIdentifier, streamConfig); + log.info("Cached {}", streamConfig); + }); } private List fetchMultiStreamLeases() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - return (List) ((List) leaseCoordinator.leaseRefresher().listLeases()); + return (List) + ((List) leaseCoordinator.leaseRefresher().listLeases()); } private void removeStreamsFromStaleStreamsList(Set streamIdentifiers) { - for(StreamIdentifier streamIdentifier : streamIdentifiers) { + for (StreamIdentifier streamIdentifier : streamIdentifiers) { staleStreamDeletionMap.remove(streamIdentifier); } } @@ -598,16 +697,22 @@ public class Scheduler implements Runnable { if (streamIdentifiers.isEmpty()) { return Collections.emptySet(); } - + log.info("Deleting streams: {}", streamIdentifiers); final Set streamsSynced = new HashSet<>(); final List leases = fetchMultiStreamLeases(); - final Map> streamIdToShardsMap = leases.stream().collect( - Collectors.groupingBy(MultiStreamLease::streamIdentifier, Collectors.toCollection(ArrayList::new))); + final Map> streamIdToShardsMap = leases.stream() + .collect(Collectors.groupingBy( + MultiStreamLease::streamIdentifier, Collectors.toCollection(ArrayList::new))); for (StreamIdentifier streamIdentifier : streamIdentifiers) { + log.warn("Found old/deleted stream: {}. Directly deleting leases of this stream.", streamIdentifier); + // Removing streamIdentifier from this map so PSSM doesn't think there is a hole in the stream while + // scheduler attempts to delete the stream if the stream is taking longer to delete. If deletion fails + // it will be retried again since stream will still show up in the staleStreamDeletionMap. + // It is fine for PSSM to detect holes and it should not do shardsync because it takes few iterations + // to breach the hole confidence interval threshold. + currentStreamConfigMap.remove(streamIdentifier); // Deleting leases will cause the workers to shutdown the record processors for these shards. if (deleteMultiStreamLeases(streamIdToShardsMap.get(streamIdentifier.serialize()))) { - log.warn("Found old/deleted stream: {}. Directly deleting leases of this stream.", streamIdentifier); - currentStreamConfigMap.remove(streamIdentifier); staleStreamDeletionMap.remove(streamIdentifier); streamsSynced.add(streamIdentifier); } @@ -627,7 +732,8 @@ public class Scheduler implements Runnable { } catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) { log.error( "Unable to delete stale stream lease {}. Skipping further deletions for this stream. Will retry later.", - lease.leaseKey(), e); + lease.leaseKey(), + e); return false; } } @@ -661,8 +767,8 @@ public class Scheduler implements Runnable { } /** - * Requests a graceful shutdown of the worker, notifying record processors, that implement - * {@link ShutdownNotificationAware}, of the impending shutdown. This gives the record processor a final chance to + * Requests a graceful shutdown of the worker, notifying record processors + * of the impending shutdown. This gives the record processor a final chance to * checkpoint. * * This will only create a single shutdown future. Additional attempts to start a graceful shutdown will return the @@ -697,8 +803,8 @@ public class Scheduler implements Runnable { public CompletableFuture startGracefulShutdown() { synchronized (this) { if (gracefulShutdownFuture == null) { - gracefulShutdownFuture = gracefulShutdownCoordinator - .startGracefulShutdown(createGracefulShutdownCallable()); + gracefulShutdownFuture = + gracefulShutdownCoordinator.startGracefulShutdown(createGracefulShutdownCallable()); } } return gracefulShutdownFuture; @@ -746,13 +852,15 @@ public class Scheduler implements Runnable { // If there are no leases notification is already completed, but we still need to shutdown the worker. // this.shutdown(); - return GracefulShutdownContext.SHUTDOWN_ALREADY_COMPLETED; + return GracefulShutdownContext.builder() + .finalShutdownLatch(finalShutdownLatch) + .build(); } CountDownLatch shutdownCompleteLatch = new CountDownLatch(leases.size()); CountDownLatch notificationCompleteLatch = new CountDownLatch(leases.size()); for (Lease lease : leases) { - ShutdownNotification shutdownNotification = new ShardConsumerShutdownNotification(leaseCoordinator, - lease, notificationCompleteLatch, shutdownCompleteLatch); + ShutdownNotification shutdownNotification = new ShardConsumerShutdownNotification( + leaseCoordinator, lease, notificationCompleteLatch, shutdownCompleteLatch); ShardInfo shardInfo = DynamoDBLeaseCoordinator.convertLeaseToAssignment(lease); ShardConsumer consumer = shardInfoShardConsumerMap.get(shardInfo); if (consumer != null) { @@ -767,7 +875,12 @@ public class Scheduler implements Runnable { shutdownCompleteLatch.countDown(); } } - return new GracefulShutdownContext(shutdownCompleteLatch, notificationCompleteLatch, this); + return GracefulShutdownContext.builder() + .shutdownCompleteLatch(shutdownCompleteLatch) + .notificationCompleteLatch(notificationCompleteLatch) + .finalShutdownLatch(finalShutdownLatch) + .scheduler(this) + .build(); }; } @@ -827,6 +940,7 @@ public class Scheduler implements Runnable { ((CloudWatchMetricsFactory) metricsFactory).shutdown(); } shutdownComplete = true; + finalShutdownLatch.countDown(); } private List getShardInfoForAssignments() { @@ -860,9 +974,10 @@ public class Scheduler implements Runnable { * Kinesis shard info * @return ShardConsumer for the shard */ - ShardConsumer createOrGetShardConsumer(@NonNull final ShardInfo shardInfo, - @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory, - @NonNull final LeaseCleanupManager leaseCleanupManager) { + ShardConsumer createOrGetShardConsumer( + @NonNull final ShardInfo shardInfo, + @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory, + @NonNull final LeaseCleanupManager leaseCleanupManager) { ShardConsumer consumer = shardInfoShardConsumerMap.get(shardInfo); // Instantiate a new consumer if we don't have one, or the one we // had was from an earlier @@ -879,28 +994,34 @@ public class Scheduler implements Runnable { } private ShardSyncTaskManager createOrGetShardSyncTaskManager(StreamConfig streamConfig) { - return streamToShardSyncTaskManagerMap.computeIfAbsent(streamConfig, s -> shardSyncTaskManagerProvider.apply(s)); + return streamToShardSyncTaskManagerMap.computeIfAbsent( + streamConfig, s -> shardSyncTaskManagerProvider.apply(s)); } - protected ShardConsumer buildConsumer(@NonNull final ShardInfo shardInfo, - @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory, - @NonNull final LeaseCleanupManager leaseCleanupManager) { - ShardRecordProcessorCheckpointer checkpointer = coordinatorConfig.coordinatorFactory().createRecordProcessorCheckpointer(shardInfo, - checkpoint); + protected ShardConsumer buildConsumer( + @NonNull final ShardInfo shardInfo, + @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory, + @NonNull final LeaseCleanupManager leaseCleanupManager) { + ShardRecordProcessorCheckpointer checkpointer = + coordinatorConfig.coordinatorFactory().createRecordProcessorCheckpointer(shardInfo, checkpoint); // The only case where streamName is not available will be when multistreamtracker not set. In this case, // get the default stream name for the single stream application. final StreamIdentifier streamIdentifier = getStreamIdentifier(shardInfo.streamIdentifierSerOpt()); // Irrespective of single stream app or multi stream app, streamConfig should always be available. - // If we have a shardInfo, that is not present in currentStreamConfigMap for whatever reason, then return default stream config + // If we have a shardInfo, that is not present in currentStreamConfigMap for whatever reason, then return + // default stream config // to gracefully complete the reading. StreamConfig streamConfig = currentStreamConfigMap.get(streamIdentifier); if (streamConfig == null) { - streamConfig = streamTracker.createStreamConfig(streamIdentifier); + streamConfig = withStreamArn(streamTracker.createStreamConfig(streamIdentifier), getKinesisRegion()); + log.info("Created orphan {}", streamConfig); } Validate.notNull(streamConfig, "StreamConfig should not be null"); - RecordsPublisher cache = retrievalConfig.retrievalFactory().createGetRecordsCache(shardInfo, streamConfig, metricsFactory); - ShardConsumerArgument argument = new ShardConsumerArgument(shardInfo, + RecordsPublisher cache = + retrievalConfig.retrievalFactory().createGetRecordsCache(shardInfo, streamConfig, metricsFactory); + ShardConsumerArgument argument = new ShardConsumerArgument( + shardInfo, streamConfig.streamIdentifier(), leaseCoordinator, executorService, @@ -923,10 +1044,15 @@ public class Scheduler implements Runnable { hierarchicalShardSyncerProvider.apply(streamConfig), metricsFactory, leaseCleanupManager, - schemaRegistryDecoder - ); - return new ShardConsumer(cache, executorService, shardInfo, lifecycleConfig.logWarningForTaskAfterMillis(), - argument, lifecycleConfig.taskExecutionListener(), lifecycleConfig.readTimeoutsToIgnoreBeforeWarning()); + schemaRegistryDecoder); + return new ShardConsumer( + cache, + executorService, + shardInfo, + lifecycleConfig.logWarningForTaskAfterMillis(), + argument, + lifecycleConfig.taskExecutionListener(), + lifecycleConfig.readTimeoutsToIgnoreBeforeWarning()); } /** @@ -960,16 +1086,16 @@ public class Scheduler implements Runnable { */ private void registerErrorHandlerForUndeliverableAsyncTaskExceptions() { RxJavaPlugins.setErrorHandler(t -> { - ExecutorStateEvent executorStateEvent = diagnosticEventFactory.executorStateEvent(executorService, - leaseCoordinator); + ExecutorStateEvent executorStateEvent = + diagnosticEventFactory.executorStateEvent(executorService, leaseCoordinator); RejectedTaskEvent rejectedTaskEvent = diagnosticEventFactory.rejectedTaskEvent(executorStateEvent, t); rejectedTaskEvent.accept(diagnosticEventHandler); }); } private void logExecutorState() { - ExecutorStateEvent executorStateEvent = diagnosticEventFactory.executorStateEvent(executorService, - leaseCoordinator); + ExecutorStateEvent executorStateEvent = + diagnosticEventFactory.executorStateEvent(executorService, leaseCoordinator); executorStateEvent.accept(diagnosticEventHandler); } @@ -979,12 +1105,86 @@ public class Scheduler implements Runnable { streamIdentifier = StreamIdentifier.multiStreamInstance(streamIdentifierString.get()); } else { Validate.isTrue(!isMultiStreamMode, "Should not be in MultiStream Mode"); - streamIdentifier = this.currentStreamConfigMap.values().iterator().next().streamIdentifier(); + streamIdentifier = + this.currentStreamConfigMap.values().iterator().next().streamIdentifier(); } Validate.notNull(streamIdentifier, "Stream identifier should not be empty"); return streamIdentifier; } + private Region getKinesisRegion() { + return retrievalConfig.kinesisClient().serviceClientConfiguration().region(); + } + + /** + * Create and return a copy of a {@link StreamConfig} object + * with {@link StreamIdentifier#streamArnOptional()} populated. + * Only to be used in multi-stream mode. + * + * @param streamConfig The {@link StreamConfig} object to return a copy of. + * @param kinesisRegion The {@link Region} the stream exists in, to be used for constructing the {@link Arn}. + * @return A copy of the {@link StreamConfig} with {@link StreamIdentifier#streamArnOptional()} populated. + */ + private static StreamConfig withStreamArn( + @NonNull final StreamConfig streamConfig, @NonNull final Region kinesisRegion) { + Validate.isTrue( + streamConfig.streamIdentifier().accountIdOptional().isPresent(), "accountId should not be empty"); + Validate.isTrue( + streamConfig.streamIdentifier().streamCreationEpochOptional().isPresent(), + "streamCreationEpoch should not be empty"); + + log.info( + "Constructing stream ARN for {} using the Kinesis client's configured region - {}.", + streamConfig.streamIdentifier(), + kinesisRegion); + + final StreamIdentifier streamIdentifierWithArn = StreamIdentifier.multiStreamInstance( + constructStreamArn( + kinesisRegion, + streamConfig.streamIdentifier().accountIdOptional().get(), + streamConfig.streamIdentifier().streamName()), + streamConfig.streamIdentifier().streamCreationEpochOptional().get()); + + return new StreamConfig( + streamIdentifierWithArn, streamConfig.initialPositionInStreamExtended(), streamConfig.consumerArn()); + } + + @RequiredArgsConstructor + private class StreamConfigMap extends ConcurrentHashMap { + /** + * If {@link StreamIdentifier#streamArnOptional()} is present for the provided + * {@link StreamConfig#streamIdentifier()}, validates that the region in the stream ARN is consistent with the + * region that the Kinesis client ({@link RetrievalConfig#kinesisClient()}) is configured with. + *

    + * In multi-stream mode, ensures stream ARN is always present by constructing it using the Kinesis client + * region when {@link StreamIdentifier#streamArnOptional()} is {@link Optional#empty()}. + *

    + * {@inheritDoc} + */ + @Override + public StreamConfig put( + @NonNull final StreamIdentifier streamIdentifier, @NonNull final StreamConfig streamConfig) { + final Region kinesisRegion = getKinesisRegion(); + + return super.put( + streamIdentifier, + streamConfig + .streamIdentifier() + .streamArnOptional() + .map(streamArn -> { + Validate.isTrue( + kinesisRegion + .id() + .equals(streamArn.region().get()), + "The provided streamARN " + streamArn + + " does not match the Kinesis client's configured region - " + + kinesisRegion); + return streamConfig; + }) + .orElse(isMultiStreamMode ? withStreamArn(streamConfig, kinesisRegion) : streamConfig)); + } + } + /** * Logger for suppressing too much INFO logging. To avoid too much logging information Worker will output logging at * INFO level for a single pass through the main loop every minute. At DEBUG level it will output all INFO logs on @@ -993,7 +1193,7 @@ public class Scheduler implements Runnable { @NoArgsConstructor(access = AccessLevel.PRIVATE) private static class SchedulerLog { - private long reportIntervalMillis = TimeUnit.MINUTES.toMillis(1); + private final long reportIntervalMillis = TimeUnit.MINUTES.toMillis(1); private long nextReportTime = System.currentTimeMillis() + reportIntervalMillis; private boolean infoReporting; @@ -1014,7 +1214,7 @@ public class Scheduler implements Runnable { private void resetInfoLogging() { if (infoReporting) { // We just logged at INFO level for a pass through worker loop - if (log.isInfoEnabled()) { + if (!log.isDebugEnabled() && !log.isTraceEnabled()) { infoReporting = false; nextReportTime = System.currentTimeMillis() + reportIntervalMillis; } // else is DEBUG or TRACE so leave reporting true diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java index d138b84f..de17542f 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java @@ -22,7 +22,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import lombok.Data; import lombok.NonNull; import software.amazon.kinesis.annotations.KinesisClientInternalApi; @@ -41,15 +40,16 @@ public class SchedulerCoordinatorFactory implements CoordinatorFactory { */ @Override public ExecutorService createExecutorService() { - return new SchedulerThreadPoolExecutor( - new ThreadFactoryBuilder().setNameFormat("ShardRecordProcessor-%04d").build()); + return new SchedulerThreadPoolExecutor(new ThreadFactoryBuilder() + .setNameFormat("ShardRecordProcessor-%04d") + .build()); } static class SchedulerThreadPoolExecutor extends ThreadPoolExecutor { private static final long DEFAULT_KEEP_ALIVE = 60L; + SchedulerThreadPoolExecutor(ThreadFactory threadFactory) { - super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, new SynchronousQueue<>(), - threadFactory); + super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, new SynchronousQueue<>(), threadFactory); } } @@ -57,8 +57,8 @@ public class SchedulerCoordinatorFactory implements CoordinatorFactory { * {@inheritDoc} */ @Override - public ShardRecordProcessorCheckpointer createRecordProcessorCheckpointer(@NonNull final ShardInfo shardInfo, - @NonNull final Checkpointer checkpoint) { + public ShardRecordProcessorCheckpointer createRecordProcessorCheckpointer( + @NonNull final ShardInfo shardInfo, @NonNull final Checkpointer checkpoint) { return new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java index ddce2a10..bc2a1b1c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java @@ -19,16 +19,15 @@ package software.amazon.kinesis.coordinator; */ @FunctionalInterface public interface WorkerStateChangeListener { - enum WorkerState { - CREATED, - INITIALIZING, - STARTED, - SHUT_DOWN_STARTED, - SHUT_DOWN - } + enum WorkerState { + CREATED, + INITIALIZING, + STARTED, + SHUT_DOWN_STARTED, + SHUT_DOWN + } - void onWorkerStateChange(WorkerState newState); + void onWorkerStateChange(WorkerState newState); - default void onAllInitializationAttemptsFailed(Throwable e) { - } + default void onAllInitializationAttemptsFailed(Throwable e) {} } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java index 97e9209d..6f2fa3a8 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java @@ -19,16 +19,16 @@ package software.amazon.kinesis.exceptions; * is not found). */ public class InvalidStateException extends KinesisClientLibNonRetryableException { - + private static final long serialVersionUID = 1L; - + /** * @param message provides more details about the cause and potential ways to debug/address. */ public InvalidStateException(String message) { super(message); } - + /** * @param message provides more details about the cause and potential ways to debug/address. * @param e Cause of the exception @@ -36,5 +36,4 @@ public class InvalidStateException extends KinesisClientLibNonRetryableException public InvalidStateException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java index 6c7c295a..1f2092cb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java @@ -15,21 +15,21 @@ package software.amazon.kinesis.exceptions; /** - * This is thrown when the Amazon Kinesis Client Library encounters issues talking to its dependencies + * This is thrown when the Amazon Kinesis Client Library encounters issues talking to its dependencies * (e.g. fetching data from Kinesis, DynamoDB table reads/writes, emitting metrics to CloudWatch). - * + * */ public class KinesisClientLibDependencyException extends KinesisClientLibRetryableException { - + private static final long serialVersionUID = 1L; - + /** * @param message provides more details about the cause and potential ways to debug/address. */ public KinesisClientLibDependencyException(String message) { super(message); } - + /** * @param message provides more details about the cause and potential ways to debug/address. * @param e Cause of the exception @@ -37,5 +37,4 @@ public class KinesisClientLibDependencyException extends KinesisClientLibRetryab public KinesisClientLibDependencyException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java index a7b2e173..0da75474 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java @@ -25,7 +25,7 @@ public abstract class KinesisClientLibException extends Exception { /** * Constructor. - * + * * @param message Message of with details of the exception. */ public KinesisClientLibException(String message) { @@ -34,12 +34,11 @@ public abstract class KinesisClientLibException extends Exception { /** * Constructor. - * + * * @param message Message with details of the exception. * @param cause Cause. */ public KinesisClientLibException(String message, Throwable cause) { super(message, cause); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java index 49f4bf6b..b538f048 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java @@ -16,7 +16,7 @@ package software.amazon.kinesis.exceptions; /** * Non-retryable exceptions. Simply retrying the same request/operation is not expected to succeed. - * + * */ public abstract class KinesisClientLibNonRetryableException extends KinesisClientLibException { @@ -24,7 +24,7 @@ public abstract class KinesisClientLibNonRetryableException extends KinesisClien /** * Constructor. - * + * * @param message Message. */ public KinesisClientLibNonRetryableException(String message) { @@ -33,7 +33,7 @@ public abstract class KinesisClientLibNonRetryableException extends KinesisClien /** * Constructor. - * + * * @param message Message. * @param e Cause. */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java index 72e9b189..35d0782e 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java @@ -22,7 +22,7 @@ public abstract class KinesisClientLibRetryableException extends RuntimeExceptio /** * Constructor. - * + * * @param message Message with details about the exception. */ public KinesisClientLibRetryableException(String message) { @@ -31,7 +31,7 @@ public abstract class KinesisClientLibRetryableException extends RuntimeExceptio /** * Constructor. - * + * * @param message Message with details about the exception. * @param e Cause. */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java index 6d7fafc0..0a530f57 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java @@ -35,5 +35,4 @@ public class ShutdownException extends KinesisClientLibNonRetryableException { public ShutdownException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java index addfa58b..ef951ef6 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java @@ -35,5 +35,4 @@ public class ThrottlingException extends KinesisClientLibRetryableException { public ThrottlingException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java index 9b1db062..db979b3d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java @@ -27,7 +27,7 @@ public class BlockedOnParentShardException extends KinesisClientLibRetryableExce /** * Constructor. - * + * * @param message Error message. */ public BlockedOnParentShardException(String message) { @@ -36,12 +36,11 @@ public class BlockedOnParentShardException extends KinesisClientLibRetryableExce /** * Constructor. - * + * * @param message Error message. * @param e Cause of the exception. */ public BlockedOnParentShardException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java index 95495013..02f9c1a4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java @@ -25,7 +25,7 @@ public class KinesisClientLibIOException extends KinesisClientLibRetryableExcept /** * Constructor. - * + * * @param message Error message. */ public KinesisClientLibIOException(String message) { @@ -34,7 +34,7 @@ public class KinesisClientLibIOException extends KinesisClientLibRetryableExcept /** * Constructor. - * + * * @param message Error message. * @param e Cause. */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java index 29d6029b..34b13f64 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java @@ -14,15 +14,15 @@ */ package software.amazon.kinesis.leases; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; - import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.kinesis.annotations.KinesisClientInternalApi; + /** * Static utility functions used by our LeaseSerializers. */ @@ -42,7 +42,9 @@ public class DynamoUtils { throw new IllegalArgumentException("Byte buffer attributeValues cannot be null or empty."); } - return AttributeValue.builder().b(SdkBytes.fromByteArray(byteBufferValue)).build(); + return AttributeValue.builder() + .b(SdkBytes.fromByteArray(byteBufferValue)) + .build(); } public static AttributeValue createAttributeValue(String stringValue) { @@ -97,5 +99,4 @@ public class DynamoUtils { return av.ss(); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java index e44125a5..053dc2a6 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java @@ -17,6 +17,7 @@ package software.amazon.kinesis.leases; import java.io.Serializable; import java.math.BigInteger; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; @@ -24,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -32,13 +32,13 @@ import com.google.common.annotations.VisibleForTesting; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; -import lombok.experimental.Accessors; -import org.apache.commons.lang3.StringUtils; - import lombok.NonNull; import lombok.RequiredArgsConstructor; +import lombok.experimental.Accessors; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import software.amazon.awssdk.services.kinesis.model.ChildShard; +import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.services.kinesis.model.Shard; import software.amazon.awssdk.services.kinesis.model.ShardFilter; import software.amazon.awssdk.services.kinesis.model.ShardFilterType; @@ -47,6 +47,7 @@ import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.coordinator.DeletedStreamListProvider; import software.amazon.kinesis.exceptions.internal.KinesisClientLibIOException; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.leases.exceptions.InvalidStateException; @@ -56,6 +57,7 @@ import software.amazon.kinesis.metrics.MetricsScope; import software.amazon.kinesis.metrics.MetricsUtil; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static java.util.Objects.nonNull; import static software.amazon.kinesis.common.HashKeyRangeForLease.fromHashKeyRange; /** @@ -72,27 +74,35 @@ public class HierarchicalShardSyncer { private final String streamIdentifier; + private final DeletedStreamListProvider deletedStreamListProvider; + private static final String MIN_HASH_KEY = BigInteger.ZERO.toString(); - private static final String MAX_HASH_KEY = new BigInteger("2").pow(128).subtract(BigInteger.ONE).toString(); - private static final int retriesForCompleteHashRange = 3; + private static final String MAX_HASH_KEY = + new BigInteger("2").pow(128).subtract(BigInteger.ONE).toString(); + private static final int RETRIES_FOR_COMPLETE_HASH_RANGE = 3; private static final long DELAY_BETWEEN_LIST_SHARDS_MILLIS = 1000; public HierarchicalShardSyncer() { - isMultiStreamMode = false; - streamIdentifier = "SingleStreamMode"; + this(false, "SingleStreamMode"); } public HierarchicalShardSyncer(final boolean isMultiStreamMode, final String streamIdentifier) { - this.isMultiStreamMode = isMultiStreamMode; - this.streamIdentifier = streamIdentifier; + this(isMultiStreamMode, streamIdentifier, null); } - private static final BiFunction shardIdFromLeaseDeducer = - (lease, multiStreamArgs) -> - multiStreamArgs.isMultiStreamMode() ? - ((MultiStreamLease) lease).shardId() : - lease.leaseKey(); + public HierarchicalShardSyncer( + final boolean isMultiStreamMode, + final String streamIdentifier, + final DeletedStreamListProvider deletedStreamListProvider) { + this.isMultiStreamMode = isMultiStreamMode; + this.streamIdentifier = streamIdentifier; + this.deletedStreamListProvider = deletedStreamListProvider; + } + + private static String getShardIdFromLease(Lease lease, MultiStreamArgs multiStreamArgs) { + return multiStreamArgs.isMultiStreamMode() ? ((MultiStreamLease) lease).shardId() : lease.leaseKey(); + } /** * Check and create leases for any new shards (e.g. following a reshard operation). Sync leases with Kinesis shards @@ -109,24 +119,41 @@ public class HierarchicalShardSyncer { * @throws ProvisionedThroughputException * @throws KinesisClientLibIOException */ - public synchronized boolean checkAndCreateLeaseForNewShards(@NonNull final ShardDetector shardDetector, - final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition, - final MetricsScope scope, final boolean ignoreUnexpectedChildShards, final boolean isLeaseTableEmpty) - throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException, InterruptedException { - final List latestShards = isLeaseTableEmpty ? - getShardListAtInitialPosition(shardDetector, initialPosition) : getShardList(shardDetector); - return checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, initialPosition, latestShards, ignoreUnexpectedChildShards, scope, + public synchronized boolean checkAndCreateLeaseForNewShards( + @NonNull final ShardDetector shardDetector, + final LeaseRefresher leaseRefresher, + final InitialPositionInStreamExtended initialPosition, + final MetricsScope scope, + final boolean ignoreUnexpectedChildShards, + final boolean isLeaseTableEmpty) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, + KinesisClientLibIOException, InterruptedException { + final List latestShards = isLeaseTableEmpty + ? getShardListAtInitialPosition(shardDetector, initialPosition) + : getShardList(shardDetector); + return checkAndCreateLeaseForNewShards( + shardDetector, + leaseRefresher, + initialPosition, + latestShards, + ignoreUnexpectedChildShards, + scope, isLeaseTableEmpty); } - //Provide a pre-collcted list of shards to avoid calling ListShards API - public synchronized boolean checkAndCreateLeaseForNewShards(@NonNull final ShardDetector shardDetector, - final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition, - List latestShards, final boolean ignoreUnexpectedChildShards, final MetricsScope scope, final boolean isLeaseTableEmpty) - throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException { - - //TODO: Need to add multistream support for this https://sim.amazon.com/issues/KinesisLTR-191 - + /** + * Provide a pre-collected list of shards to avoid calling ListShards API + */ + public synchronized boolean checkAndCreateLeaseForNewShards( + @NonNull final ShardDetector shardDetector, + final LeaseRefresher leaseRefresher, + final InitialPositionInStreamExtended initialPosition, + List latestShards, + final boolean ignoreUnexpectedChildShards, + final MetricsScope scope, + final boolean isLeaseTableEmpty) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, + KinesisClientLibIOException { if (!CollectionUtils.isNullOrEmpty(latestShards)) { log.debug("{} - Num shards: {}", streamIdentifier, latestShards.size()); } else { @@ -135,39 +162,45 @@ public class HierarchicalShardSyncer { } final Map shardIdToShardMap = constructShardIdToShardMap(latestShards); - final Map> shardIdToChildShardIdsMap = constructShardIdToChildShardIdsMap( - shardIdToShardMap); + final Map> shardIdToChildShardIdsMap = + constructShardIdToChildShardIdsMap(shardIdToShardMap); final Set inconsistentShardIds = findInconsistentShardIds(shardIdToChildShardIdsMap, shardIdToShardMap); if (!ignoreUnexpectedChildShards) { assertAllParentShardsAreClosed(inconsistentShardIds); } - final List currentLeases = isMultiStreamMode ? - leaseRefresher.listLeasesForStream(shardDetector.streamIdentifier()) : leaseRefresher.listLeases(); - final MultiStreamArgs multiStreamArgs = new MultiStreamArgs(isMultiStreamMode, shardDetector.streamIdentifier()); - final LeaseSynchronizer leaseSynchronizer = isLeaseTableEmpty ? new EmptyLeaseTableSynchronizer() : - new NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); - final List newLeasesToCreate = determineNewLeasesToCreate(leaseSynchronizer, latestShards, currentLeases, - initialPosition, inconsistentShardIds, multiStreamArgs); + final List currentLeases = isMultiStreamMode + ? leaseRefresher.listLeasesForStream(shardDetector.streamIdentifier()) + : leaseRefresher.listLeases(); + final MultiStreamArgs multiStreamArgs = + new MultiStreamArgs(isMultiStreamMode, shardDetector.streamIdentifier()); + final LeaseSynchronizer leaseSynchronizer = isLeaseTableEmpty + ? new EmptyLeaseTableSynchronizer() + : new NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); + final List newLeasesToCreate = determineNewLeasesToCreate( + leaseSynchronizer, latestShards, currentLeases, initialPosition, inconsistentShardIds, multiStreamArgs); log.info("{} - Number of new leases to create: {}", streamIdentifier, newLeasesToCreate.size()); final Set createdLeases = new HashSet<>(); for (Lease lease : newLeasesToCreate) { - long startTime = System.currentTimeMillis(); + final long startTime = System.currentTimeMillis(); boolean success = false; try { - if(leaseRefresher.createLeaseIfNotExists(lease)) { + if (leaseRefresher.createLeaseIfNotExists(lease)) { createdLeases.add(lease); } success = true; - } - finally { + } finally { MetricsUtil.addSuccessAndLatency(scope, "CreateLease", success, startTime, MetricsLevel.DETAILED); + if (lease.checkpoint() != null) { + final String metricName = lease.checkpoint().isSentinelCheckpoint() + ? lease.checkpoint().sequenceNumber() + : "SEQUENCE_NUMBER"; + MetricsUtil.addSuccess(scope, "CreateLease_" + metricName, true, MetricsLevel.DETAILED); + } } } log.info("{} - Newly created leases {}: {}", streamIdentifier, createdLeases.size(), createdLeases); - final List trackedLeases = new ArrayList<>(currentLeases); - trackedLeases.addAll(newLeasesToCreate); return true; } @@ -177,11 +210,13 @@ public class HierarchicalShardSyncer { * @throws KinesisClientLibIOException */ private static void assertAllParentShardsAreClosed(final Set inconsistentShardIds) - throws KinesisClientLibIOException { + throws KinesisClientLibIOException { if (!CollectionUtils.isNullOrEmpty(inconsistentShardIds)) { final String ids = StringUtils.join(inconsistentShardIds, ' '); throw new KinesisClientLibIOException(String.format( + // CHECKSTYLE.OFF: LineLength "%d open child shards (%s) are inconsistent. This can happen due to a race condition between describeStream and a reshard operation.", + // CHECKSTYLE.ON: LineLength inconsistentShardIds.size(), ids)); } } @@ -193,12 +228,17 @@ public class HierarchicalShardSyncer { * @param shardIdToShardMap * @return Set of inconsistent open shard ids for shards having open parents. */ - private static Set findInconsistentShardIds(final Map> shardIdToChildShardIdsMap, - final Map shardIdToShardMap) { + private static Set findInconsistentShardIds( + final Map> shardIdToChildShardIdsMap, final Map shardIdToShardMap) { return shardIdToChildShardIdsMap.entrySet().stream() .filter(entry -> entry.getKey() == null - || shardIdToShardMap.get(entry.getKey()).sequenceNumberRange().endingSequenceNumber() == null) - .flatMap(entry -> shardIdToChildShardIdsMap.get(entry.getKey()).stream()).collect(Collectors.toSet()); + || shardIdToShardMap + .get(entry.getKey()) + .sequenceNumberRange() + .endingSequenceNumber() + == null) + .flatMap(entry -> shardIdToChildShardIdsMap.get(entry.getKey()).stream()) + .collect(Collectors.toSet()); } /** @@ -215,15 +255,15 @@ public class HierarchicalShardSyncer { final Shard shard = entry.getValue(); final String parentShardId = shard.parentShardId(); if (parentShardId != null && shardIdToShardMap.containsKey(parentShardId)) { - final Set childShardIds = shardIdToChildShardIdsMap.computeIfAbsent(parentShardId, - key -> new HashSet<>()); + final Set childShardIds = + shardIdToChildShardIdsMap.computeIfAbsent(parentShardId, key -> new HashSet<>()); childShardIds.add(shardId); } final String adjacentParentShardId = shard.adjacentParentShardId(); if (adjacentParentShardId != null && shardIdToShardMap.containsKey(adjacentParentShardId)) { - final Set childShardIds = shardIdToChildShardIdsMap.computeIfAbsent(adjacentParentShardId, - key -> new HashSet<>()); + final Set childShardIds = + shardIdToChildShardIdsMap.computeIfAbsent(adjacentParentShardId, key -> new HashSet<>()); childShardIds.add(shardId); } } @@ -235,7 +275,8 @@ public class HierarchicalShardSyncer { * @param initialPositionInStreamExtended * @return ShardFilter shard filter for the corresponding position in the stream. */ - private static ShardFilter getShardFilterFromInitialPosition(InitialPositionInStreamExtended initialPositionInStreamExtended) { + private static ShardFilter getShardFilterFromInitialPosition( + InitialPositionInStreamExtended initialPositionInStreamExtended) { ShardFilter.Builder builder = ShardFilter.builder(); switch (initialPositionInStreamExtended.getInitialPositionInStream()) { @@ -246,26 +287,29 @@ public class HierarchicalShardSyncer { builder = builder.type(ShardFilterType.AT_TRIM_HORIZON); break; case AT_TIMESTAMP: - builder = builder.type(ShardFilterType.AT_TIMESTAMP).timestamp(initialPositionInStreamExtended.getTimestamp().toInstant()); + builder = builder.type(ShardFilterType.AT_TIMESTAMP) + .timestamp( + initialPositionInStreamExtended.getTimestamp().toInstant()); break; } return builder.build(); } - private static List getShardListAtInitialPosition(@NonNull final ShardDetector shardDetector, - InitialPositionInStreamExtended initialPositionInStreamExtended) throws KinesisClientLibIOException, InterruptedException { + private static List getShardListAtInitialPosition( + @NonNull final ShardDetector shardDetector, InitialPositionInStreamExtended initialPositionInStreamExtended) + throws KinesisClientLibIOException, InterruptedException { final ShardFilter shardFilter = getShardFilterFromInitialPosition(initialPositionInStreamExtended); final String streamName = shardDetector.streamIdentifier().streamName(); List shards; - for (int i = 0; i < retriesForCompleteHashRange; i++) { + for (int i = 0; i < RETRIES_FOR_COMPLETE_HASH_RANGE; i++) { shards = shardDetector.listShardsWithFilter(shardFilter); if (shards == null) { - throw new KinesisClientLibIOException( - "Stream " + streamName + " is not in ACTIVE OR UPDATING state - will retry getting the shard list."); + throw new KinesisClientLibIOException("Stream " + streamName + + " is not in ACTIVE OR UPDATING state - will retry getting the shard list."); } if (isHashRangeOfShardsComplete(shards)) { @@ -275,15 +319,25 @@ public class HierarchicalShardSyncer { Thread.sleep(DELAY_BETWEEN_LIST_SHARDS_MILLIS); } - throw new KinesisClientLibIOException("Hash range of shards returned for " + streamName + " was incomplete after " - + retriesForCompleteHashRange + " retries."); + throw new KinesisClientLibIOException("Hash range of shards returned for " + streamName + + " was incomplete after " + RETRIES_FOR_COMPLETE_HASH_RANGE + " retries."); } - private static List getShardList(@NonNull final ShardDetector shardDetector) throws KinesisClientLibIOException { - final Optional> shards = Optional.of(shardDetector.listShards()); + private List getShardList(@NonNull final ShardDetector shardDetector) throws KinesisClientLibIOException { + // Fallback to existing behavior for backward compatibility + List shardList = Collections.emptyList(); + try { + shardList = shardDetector.listShardsWithoutConsumingResourceNotFoundException(); + } catch (ResourceNotFoundException e) { + if (nonNull(this.deletedStreamListProvider) && isMultiStreamMode) { + deletedStreamListProvider.add(StreamIdentifier.multiStreamInstance(streamIdentifier)); + } + } + final Optional> shards = Optional.of(shardList); - return shards.orElseThrow(() -> new KinesisClientLibIOException("Stream " + shardDetector.streamIdentifier().streamName() + - " is not in ACTIVE OR UPDATING state - will retry getting the shard list.")); + return shards.orElseThrow(() -> new KinesisClientLibIOException( + "Stream " + shardDetector.streamIdentifier().streamName() + + " is not in ACTIVE OR UPDATING state - will retry getting the shard list.")); } private static boolean isHashRangeOfShardsComplete(@NonNull List shards) { @@ -294,8 +348,8 @@ public class HierarchicalShardSyncer { final Comparator shardStartingHashKeyBasedComparator = new ShardStartingHashKeyBasedComparator(); shards.sort(shardStartingHashKeyBasedComparator); - if (!shards.get(0).hashKeyRange().startingHashKey().equals(MIN_HASH_KEY) || - !shards.get(shards.size() - 1).hashKeyRange().endingHashKey().equals(MAX_HASH_KEY)) { + if (!shards.get(0).hashKeyRange().startingHashKey().equals(MIN_HASH_KEY) + || !shards.get(shards.size() - 1).hashKeyRange().endingHashKey().equals(MAX_HASH_KEY)) { return false; } @@ -303,11 +357,16 @@ public class HierarchicalShardSyncer { for (int i = 1; i < shards.size(); i++) { final Shard shardAtStartOfPossibleHole = shards.get(i - 1); final Shard shardAtEndOfPossibleHole = shards.get(i); - final BigInteger startOfPossibleHole = new BigInteger(shardAtStartOfPossibleHole.hashKeyRange().endingHashKey()); - final BigInteger endOfPossibleHole = new BigInteger(shardAtEndOfPossibleHole.hashKeyRange().startingHashKey()); + final BigInteger startOfPossibleHole = + new BigInteger(shardAtStartOfPossibleHole.hashKeyRange().endingHashKey()); + final BigInteger endOfPossibleHole = + new BigInteger(shardAtEndOfPossibleHole.hashKeyRange().startingHashKey()); if (!endOfPossibleHole.subtract(startOfPossibleHole).equals(BigInteger.ONE)) { - log.error("Incomplete hash range found between {} and {}.", shardAtStartOfPossibleHole, shardAtEndOfPossibleHole); + log.error( + "Incomplete hash range found between {} and {}.", + shardAtStartOfPossibleHole, + shardAtEndOfPossibleHole); return false; } } @@ -329,10 +388,15 @@ public class HierarchicalShardSyncer { * @param multiStreamArgs determines if we are using multistream mode. * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard */ - static List determineNewLeasesToCreate(final LeaseSynchronizer leaseSynchronizer, final List shards, - final List currentLeases, final InitialPositionInStreamExtended initialPosition, - final Set inconsistentShardIds, final MultiStreamArgs multiStreamArgs) { - return leaseSynchronizer.determineNewLeasesToCreate(shards, currentLeases, initialPosition, inconsistentShardIds, multiStreamArgs); + static List determineNewLeasesToCreate( + final LeaseSynchronizer leaseSynchronizer, + final List shards, + final List currentLeases, + final InitialPositionInStreamExtended initialPosition, + final Set inconsistentShardIds, + final MultiStreamArgs multiStreamArgs) { + return leaseSynchronizer.determineNewLeasesToCreate( + shards, currentLeases, initialPosition, inconsistentShardIds, multiStreamArgs); } /** @@ -347,9 +411,18 @@ public class HierarchicalShardSyncer { * @param inconsistentShardIds Set of child shard ids having open parents. * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard */ - static List determineNewLeasesToCreate(final LeaseSynchronizer leaseSynchronizer, final List shards, - final List currentLeases, final InitialPositionInStreamExtended initialPosition,final Set inconsistentShardIds) { - return determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, initialPosition, inconsistentShardIds, + static List determineNewLeasesToCreate( + final LeaseSynchronizer leaseSynchronizer, + final List shards, + final List currentLeases, + final InitialPositionInStreamExtended initialPosition, + final Set inconsistentShardIds) { + return determineNewLeasesToCreate( + leaseSynchronizer, + shards, + currentLeases, + initialPosition, + inconsistentShardIds, new MultiStreamArgs(false, null)); } @@ -364,10 +437,14 @@ public class HierarchicalShardSyncer { * location in the shard (when an application starts up for the first time - and there are no checkpoints). * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard */ - static List determineNewLeasesToCreate(final LeaseSynchronizer leaseSynchronizer, final List shards, - final List currentLeases, final InitialPositionInStreamExtended initialPosition) { + static List determineNewLeasesToCreate( + final LeaseSynchronizer leaseSynchronizer, + final List shards, + final List currentLeases, + final InitialPositionInStreamExtended initialPosition) { final Set inconsistentShardIds = new HashSet<>(); - return determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, initialPosition, inconsistentShardIds); + return determineNewLeasesToCreate( + leaseSynchronizer, shards, currentLeases, initialPosition, inconsistentShardIds); } /** @@ -385,10 +462,13 @@ public class HierarchicalShardSyncer { * @param memoizationContext Memoization of shards that have been evaluated as part of the evaluation * @return true if the shard is a descendant of any current shard (lease already exists) */ - static boolean checkIfDescendantAndAddNewLeasesForAncestors(final String shardId, - final InitialPositionInStreamExtended initialPosition, final Set shardIdsOfCurrentLeases, + static boolean checkIfDescendantAndAddNewLeasesForAncestors( + final String shardId, + final InitialPositionInStreamExtended initialPosition, + final Set shardIdsOfCurrentLeases, final Map shardIdToShardMapOfAllKinesisShards, - final Map shardIdToLeaseMapOfNewShards, final MemoizationContext memoizationContext, + final Map shardIdToLeaseMapOfNewShards, + final MemoizationContext memoizationContext, final MultiStreamArgs multiStreamArgs) { final String streamIdentifier = getStreamIdentifier(multiStreamArgs); final Boolean previousValue = memoizationContext.isDescendant(shardId); @@ -405,7 +485,10 @@ public class HierarchicalShardSyncer { isDescendant = true; // We don't need to add leases of its ancestors, // because we'd have done it when creating a lease for this shard. - log.debug("{} - Shard {} is a descendant shard of an existing shard. Skipping lease creation", streamIdentifier, shardId); + log.debug( + "{} - Shard {} is a descendant shard of an existing shard. Skipping lease creation", + streamIdentifier, + shardId); } else { final Shard shard = shardIdToShardMapOfAllKinesisShards.get(shardId); @@ -414,9 +497,14 @@ public class HierarchicalShardSyncer { // Check if the parent is a descendant, and include its ancestors. Or, if the parent is NOT a // descendant but we should create a lease for it anyway (e.g. to include in processing from // TRIM_HORIZON or AT_TIMESTAMP). If either is true, then we mark the current shard as a descendant. - final boolean isParentDescendant = checkIfDescendantAndAddNewLeasesForAncestors(parentShardId, - initialPosition, shardIdsOfCurrentLeases, shardIdToShardMapOfAllKinesisShards, - shardIdToLeaseMapOfNewShards, memoizationContext, multiStreamArgs); + final boolean isParentDescendant = checkIfDescendantAndAddNewLeasesForAncestors( + parentShardId, + initialPosition, + shardIdsOfCurrentLeases, + shardIdToShardMapOfAllKinesisShards, + shardIdToLeaseMapOfNewShards, + memoizationContext, + multiStreamArgs); if (isParentDescendant || memoizationContext.shouldCreateLease(parentShardId)) { isDescendant = true; descendantParentShardIds.add(parentShardId); @@ -443,13 +531,17 @@ public class HierarchicalShardSyncer { * therefore covered in the lease table). So we should create a lease for the parent. */ if (lease == null) { - if (memoizationContext.shouldCreateLease(parentShardId) || - !descendantParentShardIds.contains(parentShardId)) { - log.debug("{} : Need to create a lease for shardId {}", streamIdentifier, parentShardId); - lease = multiStreamArgs.isMultiStreamMode() ? - newKCLMultiStreamLease(shardIdToShardMapOfAllKinesisShards.get(parentShardId), - multiStreamArgs.streamIdentifier()) : - newKCLLease(shardIdToShardMapOfAllKinesisShards.get(parentShardId)); + if (memoizationContext.shouldCreateLease(parentShardId) + || !descendantParentShardIds.contains(parentShardId)) { + log.debug( + "{} : Need to create a lease for shardId {}", + streamIdentifier, + parentShardId); + lease = multiStreamArgs.isMultiStreamMode() + ? newKCLMultiStreamLease( + shardIdToShardMapOfAllKinesisShards.get(parentShardId), + multiStreamArgs.streamIdentifier()) + : newKCLLease(shardIdToShardMapOfAllKinesisShards.get(parentShardId)); shardIdToLeaseMapOfNewShards.put(parentShardId, lease); } } @@ -480,13 +572,21 @@ public class HierarchicalShardSyncer { */ if (lease != null) { if (descendantParentShardIds.contains(parentShardId) - && !initialPosition.getInitialPositionInStream() - .equals(InitialPositionInStream.AT_TIMESTAMP)) { - log.info("Setting Lease '{}' checkpoint to 'TRIM_HORIZON'. Checkpoint was previously set to {}", lease.leaseKey(), lease.checkpoint()); + && !initialPosition + .getInitialPositionInStream() + .equals(InitialPositionInStream.AT_TIMESTAMP)) { + log.info( + "Setting Lease '{}' checkpoint to 'TRIM_HORIZON'. Checkpoint was previously set to {}", + lease.leaseKey(), + lease.checkpoint()); lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); } else { final ExtendedSequenceNumber newCheckpoint = convertToCheckpoint(initialPosition); - log.info("Setting Lease '{}' checkpoint to '{}'. Checkpoint was previously set to {}", lease.leaseKey(), newCheckpoint, lease.checkpoint()); + log.info( + "Setting Lease '{}' checkpoint to '{}'. Checkpoint was previously set to {}", + lease.leaseKey(), + newCheckpoint, + lease.checkpoint()); lease.checkpoint(newCheckpoint); } } @@ -498,8 +598,9 @@ public class HierarchicalShardSyncer { // lease just like we do for TRIM_HORIZON. However we will only return back records with server-side // timestamp at or after the specified initial position timestamp. if (initialPosition.getInitialPositionInStream().equals(InitialPositionInStream.TRIM_HORIZON) - || initialPosition.getInitialPositionInStream() - .equals(InitialPositionInStream.AT_TIMESTAMP)) { + || initialPosition + .getInitialPositionInStream() + .equals(InitialPositionInStream.AT_TIMESTAMP)) { memoizationContext.setShouldCreateLease(shardId, true); } } @@ -510,12 +611,20 @@ public class HierarchicalShardSyncer { return isDescendant; } - static boolean checkIfDescendantAndAddNewLeasesForAncestors(final String shardId, - final InitialPositionInStreamExtended initialPosition, final Set shardIdsOfCurrentLeases, + static boolean checkIfDescendantAndAddNewLeasesForAncestors( + final String shardId, + final InitialPositionInStreamExtended initialPosition, + final Set shardIdsOfCurrentLeases, final Map shardIdToShardMapOfAllKinesisShards, - final Map shardIdToLeaseMapOfNewShards, MemoizationContext memoizationContext) { - return checkIfDescendantAndAddNewLeasesForAncestors(shardId, initialPosition, shardIdsOfCurrentLeases, - shardIdToShardMapOfAllKinesisShards, shardIdToLeaseMapOfNewShards, memoizationContext, + final Map shardIdToLeaseMapOfNewShards, + MemoizationContext memoizationContext) { + return checkIfDescendantAndAddNewLeasesForAncestors( + shardId, + initialPosition, + shardIdsOfCurrentLeases, + shardIdToShardMapOfAllKinesisShards, + shardIdToLeaseMapOfNewShards, + memoizationContext, new MultiStreamArgs(false, null)); } @@ -528,8 +637,8 @@ public class HierarchicalShardSyncer { * @param shardIdToShardMapOfAllKinesisShards ShardId->Shard map containing all shards obtained via DescribeStream. * @return Set of parentShardIds */ - static Set getParentShardIds(final Shard shard, - final Map shardIdToShardMapOfAllKinesisShards) { + static Set getParentShardIds( + final Shard shard, final Map shardIdToShardMapOfAllKinesisShards) { final Set parentShardIds = new HashSet<>(2); final String parentShardId = shard.parentShardId(); if (parentShardId != null && shardIdToShardMapOfAllKinesisShards.containsKey(parentShardId)) { @@ -542,11 +651,13 @@ public class HierarchicalShardSyncer { return parentShardIds; } - public synchronized Lease createLeaseForChildShard(final ChildShard childShard, final StreamIdentifier streamIdentifier) throws InvalidStateException { + public synchronized Lease createLeaseForChildShard( + final ChildShard childShard, final StreamIdentifier streamIdentifier) throws InvalidStateException { final MultiStreamArgs multiStreamArgs = new MultiStreamArgs(isMultiStreamMode, streamIdentifier); - return multiStreamArgs.isMultiStreamMode() ? newKCLMultiStreamLeaseForChildShard(childShard, streamIdentifier) - : newKCLLeaseForChildShard(childShard); + return multiStreamArgs.isMultiStreamMode() + ? newKCLMultiStreamLeaseForChildShard(childShard, streamIdentifier) + : newKCLLeaseForChildShard(childShard); } /** @@ -561,7 +672,8 @@ public class HierarchicalShardSyncer { if (!CollectionUtils.isNullOrEmpty(childShard.parentShards())) { newLease.parentShardIds(childShard.parentShards()); } else { - throw new InvalidStateException("Unable to populate new lease for child shard " + childShard.shardId() + "because parent shards cannot be found."); + throw new InvalidStateException("Unable to populate new lease for child shard " + childShard.shardId() + + " because parent shards cannot be found."); } newLease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); newLease.ownerSwitchesSinceCheckpoint(0L); @@ -569,13 +681,15 @@ public class HierarchicalShardSyncer { return newLease; } - private static Lease newKCLMultiStreamLeaseForChildShard(final ChildShard childShard, final StreamIdentifier streamIdentifier) throws InvalidStateException { + private static Lease newKCLMultiStreamLeaseForChildShard( + final ChildShard childShard, final StreamIdentifier streamIdentifier) throws InvalidStateException { MultiStreamLease newLease = new MultiStreamLease(); newLease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), childShard.shardId())); if (!CollectionUtils.isNullOrEmpty(childShard.parentShards())) { newLease.parentShardIds(childShard.parentShards()); } else { - throw new InvalidStateException("Unable to populate new lease for child shard " + childShard.shardId() + "because parent shards cannot be found."); + throw new InvalidStateException("Unable to populate new lease for child shard " + childShard.shardId() + + " because parent shards cannot be found."); } newLease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); newLease.ownerSwitchesSinceCheckpoint(0L); @@ -590,7 +704,6 @@ public class HierarchicalShardSyncer { * Note: Package level access only for testing purposes * * @param shard - * @return */ private static Lease newKCLLease(final Shard shard) { Lease newLease = new Lease(); @@ -644,8 +757,10 @@ public class HierarchicalShardSyncer { * @return List of open shards (shards at the tip of the stream) - may include shards that are not yet active. */ static List getOpenShards(final List allShards, final String streamIdentifier) { - return allShards.stream().filter(shard -> shard.sequenceNumberRange().endingSequenceNumber() == null) - .peek(shard -> log.debug("{} : Found open shard: {}", streamIdentifier, shard.shardId())).collect(Collectors.toList()); + return allShards.stream() + .filter(shard -> shard.sequenceNumberRange().endingSequenceNumber() == null) + .peek(shard -> log.debug("{} : Found open shard: {}", streamIdentifier, shard.shardId())) + .collect(Collectors.toList()); } private static ExtendedSequenceNumber convertToCheckpoint(final InitialPositionInStreamExtended position) { @@ -664,7 +779,8 @@ public class HierarchicalShardSyncer { private static String getStreamIdentifier(MultiStreamArgs multiStreamArgs) { return Optional.ofNullable(multiStreamArgs.streamIdentifier()) - .map(streamId -> streamId.serialize()).orElse("single_stream_mode"); + .map(streamId -> streamId.serialize()) + .orElse("single_stream_mode"); } /** @@ -711,15 +827,17 @@ public class HierarchicalShardSyncer { @Override public int compare(final Lease lease1, final Lease lease2) { int result = 0; - final String shardId1 = shardIdFromLeaseDeducer.apply(lease1, multiStreamArgs); - final String shardId2 = shardIdFromLeaseDeducer.apply(lease2, multiStreamArgs); + final String shardId1 = getShardIdFromLease(lease1, multiStreamArgs); + final String shardId2 = getShardIdFromLease(lease2, multiStreamArgs); final Shard shard1 = shardIdToShardMap.get(shardId1); final Shard shard2 = shardIdToShardMap.get(shardId2); // If we found shards for the two leases, use comparison of the starting sequence numbers if (shard1 != null && shard2 != null) { - BigInteger sequenceNumber1 = new BigInteger(shard1.sequenceNumberRange().startingSequenceNumber()); - BigInteger sequenceNumber2 = new BigInteger(shard2.sequenceNumberRange().startingSequenceNumber()); + BigInteger sequenceNumber1 = + new BigInteger(shard1.sequenceNumberRange().startingSequenceNumber()); + BigInteger sequenceNumber2 = + new BigInteger(shard2.sequenceNumberRange().startingSequenceNumber()); result = sequenceNumber1.compareTo(sequenceNumber2); } @@ -753,9 +871,12 @@ public class HierarchicalShardSyncer { * @param multiStreamArgs * @return */ - List determineNewLeasesToCreate(List shards, List currentLeases, - InitialPositionInStreamExtended initialPosition, Set inconsistentShardIds, - MultiStreamArgs multiStreamArgs); + List determineNewLeasesToCreate( + List shards, + List currentLeases, + InitialPositionInStreamExtended initialPosition, + Set inconsistentShardIds, + MultiStreamArgs multiStreamArgs); } /** @@ -778,21 +899,29 @@ public class HierarchicalShardSyncer { * @return */ @Override - public List determineNewLeasesToCreate(List shards, List currentLeases, - InitialPositionInStreamExtended initialPosition, Set inconsistentShardIds, MultiStreamArgs multiStreamArgs) { + public List determineNewLeasesToCreate( + List shards, + List currentLeases, + InitialPositionInStreamExtended initialPosition, + Set inconsistentShardIds, + MultiStreamArgs multiStreamArgs) { final String streamIdentifier = Optional.ofNullable(multiStreamArgs.streamIdentifier()) - .map(streamId -> streamId.serialize()).orElse(""); + .map(streamId -> streamId.serialize()) + .orElse(""); final Map shardIdToShardMapOfAllKinesisShards = constructShardIdToShardMap(shards); - currentLeases.stream().peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease)) - .map(lease -> shardIdFromLeaseDeducer.apply(lease, multiStreamArgs)) + currentLeases.stream() + .peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease)) + .map(lease -> getShardIdFromLease(lease, multiStreamArgs)) .collect(Collectors.toSet()); - final List newLeasesToCreate = getLeasesToCreateForOpenAndClosedShards(initialPosition, shards, multiStreamArgs, streamIdentifier); + final List newLeasesToCreate = + getLeasesToCreateForOpenAndClosedShards(initialPosition, shards, multiStreamArgs, streamIdentifier); - //TODO: Verify before LTR launch that ending sequence number is still returned from the service. + // TODO: Verify before LTR launch that ending sequence number is still returned from the service. final Comparator startingSequenceNumberComparator = - new StartingSequenceNumberAndShardIdBasedComparator(shardIdToShardMapOfAllKinesisShards, multiStreamArgs); + new StartingSequenceNumberAndShardIdBasedComparator( + shardIdToShardMapOfAllKinesisShards, multiStreamArgs); newLeasesToCreate.sort(startingSequenceNumberComparator); return newLeasesToCreate; } @@ -802,14 +931,18 @@ public class HierarchicalShardSyncer { * regardless of if they are open or closed. Closed shards will be unblocked via child shard information upon * reaching SHARD_END. */ - private List getLeasesToCreateForOpenAndClosedShards(InitialPositionInStreamExtended initialPosition, - List shards, MultiStreamArgs multiStreamArgs, String streamId) { + private List getLeasesToCreateForOpenAndClosedShards( + InitialPositionInStreamExtended initialPosition, + List shards, + MultiStreamArgs multiStreamArgs, + String streamId) { final Map shardIdToNewLeaseMap = new HashMap<>(); for (Shard shard : shards) { final String shardId = shard.shardId(); - final Lease lease = multiStreamArgs.isMultiStreamMode() ? - newKCLMultiStreamLease(shard, multiStreamArgs.streamIdentifier) : newKCLLease(shard); + final Lease lease = multiStreamArgs.isMultiStreamMode() + ? newKCLMultiStreamLease(shard, multiStreamArgs.streamIdentifier) + : newKCLLease(shard); lease.checkpoint(convertToCheckpoint(initialPosition)); log.debug("{} : Need to create a lease for shard with shardId {}", streamId, shardId); @@ -855,22 +988,20 @@ public class HierarchicalShardSyncer { * * the parent shard has expired. *

    * For example: + *

              * Shard structure (each level depicts a stream segment):
              * 0 1 2 3 4   5   - shards till epoch 102
              * \ / \ / |   |
              *  6   7  4   5   - shards from epoch 103 - 205
              *  \  /   |  / \
              *   8     4 9  10 - shards from epoch 206 (open - no ending sequenceNumber)
    -         *
    -         * Current leases: (4, 5, 7)
    -         *
    -         * If initial position is LATEST:
    -         *   - New leases to create: (6)
    -         * If initial position is TRIM_HORIZON:
    -         *   - New leases to create: (0, 1)
    -         * If initial position is AT_TIMESTAMP(epoch=200):
    -         *   - New leases to create: (0, 1)
    -         *
    +         * 
    + * Assuming current leases are (4, 5, 7), new leases to create for an initial position are: + *
      + *
    • LATEST: (6)
    • + *
    • TRIM_HORIZON: (0, 1)
    • + *
    • AT_TIMESTAMP(epoch=200): (0, 1)
    • + *
    *

    * The leases returned are sorted by the starting sequence number - following the same order * when persisting the leases in DynamoDB will ensure that we recover gracefully if we fail @@ -884,16 +1015,20 @@ public class HierarchicalShardSyncer { * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard */ @Override - public synchronized List determineNewLeasesToCreate(final List shards, final List currentLeases, - final InitialPositionInStreamExtended initialPosition, final Set inconsistentShardIds, + public synchronized List determineNewLeasesToCreate( + final List shards, + final List currentLeases, + final InitialPositionInStreamExtended initialPosition, + final Set inconsistentShardIds, final MultiStreamArgs multiStreamArgs) { final Map shardIdToNewLeaseMap = new HashMap<>(); final Map shardIdToShardMapOfAllKinesisShards = constructShardIdToShardMap(shards); final String streamIdentifier = Optional.ofNullable(multiStreamArgs.streamIdentifier()) - .map(streamId -> streamId.serialize()).orElse(""); + .map(streamId -> streamId.serialize()) + .orElse(""); final Set shardIdsOfCurrentLeases = currentLeases.stream() .peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease)) - .map(lease -> shardIdFromLeaseDeducer.apply(lease, multiStreamArgs)) + .map(lease -> getShardIdFromLease(lease, multiStreamArgs)) .collect(Collectors.toSet()); final List openShards = getOpenShards(shards, streamIdentifier); @@ -904,9 +1039,15 @@ public class HierarchicalShardSyncer { final String shardId = shard.shardId(); log.debug("{} : Evaluating leases for open shard {} and its ancestors.", streamIdentifier, shardId); if (shardIdsOfCurrentLeases.contains(shardId)) { - log.debug("{} : Lease for shardId {} already exists. Not creating a lease", streamIdentifier, shardId); + log.debug( + "{} : Lease for shardId {} already exists. Not creating a lease", + streamIdentifier, + shardId); } else if (inconsistentShardIds.contains(shardId)) { - log.info("{} : shardId {} is an inconsistent child. Not creating a lease", streamIdentifier, shardId); + log.info( + "{} : shardId {} is an inconsistent child. Not creating a lease", + streamIdentifier, + shardId); } else { log.debug("{} : Beginning traversal of ancestry tree for shardId {}", streamIdentifier, shardId); @@ -914,9 +1055,14 @@ public class HierarchicalShardSyncer { // We will create leases for only one level in the ancestry tree. Once we find the first ancestor // that needs to be processed in order to complete the hash range, we will not create leases for // further descendants of that ancestor. - final boolean isDescendant = checkIfDescendantAndAddNewLeasesForAncestors(shardId, initialPosition, - shardIdsOfCurrentLeases, shardIdToShardMapOfAllKinesisShards, shardIdToNewLeaseMap, - memoizationContext, multiStreamArgs); + final boolean isDescendant = checkIfDescendantAndAddNewLeasesForAncestors( + shardId, + initialPosition, + shardIdsOfCurrentLeases, + shardIdToShardMapOfAllKinesisShards, + shardIdToNewLeaseMap, + memoizationContext, + multiStreamArgs); // If shard is a descendant, the leases for its ancestors were already created above. Open shards // that are NOT descendants will not have leases yet, so we create them here. We will not create @@ -924,22 +1070,30 @@ public class HierarchicalShardSyncer { // SHARD_END of their parents. if (!isDescendant) { log.debug("{} : shardId {} has no ancestors. Creating a lease.", streamIdentifier, shardId); - final Lease newLease = multiStreamArgs.isMultiStreamMode() ? - newKCLMultiStreamLease(shard, multiStreamArgs.streamIdentifier()) : - newKCLLease(shard); + final Lease newLease = multiStreamArgs.isMultiStreamMode() + ? newKCLMultiStreamLease(shard, multiStreamArgs.streamIdentifier()) + : newKCLLease(shard); newLease.checkpoint(convertToCheckpoint(initialPosition)); - log.debug("{} : Set checkpoint of {} to {}", streamIdentifier, newLease.leaseKey(), newLease.checkpoint()); + log.debug( + "{} : Set checkpoint of {} to {}", + streamIdentifier, + newLease.leaseKey(), + newLease.checkpoint()); shardIdToNewLeaseMap.put(shardId, newLease); } else { - log.debug("{} : shardId {} is a descendant whose ancestors should already have leases. " + - "Not creating a lease.", streamIdentifier, shardId); + log.debug( + "{} : shardId {} is a descendant whose ancestors should already have leases. " + + "Not creating a lease.", + streamIdentifier, + shardId); } } } final List newLeasesToCreate = new ArrayList<>(shardIdToNewLeaseMap.values()); - final Comparator startingSequenceNumberComparator = new StartingSequenceNumberAndShardIdBasedComparator( - shardIdToShardMapOfAllKinesisShards, multiStreamArgs); + final Comparator startingSequenceNumberComparator = + new StartingSequenceNumberAndShardIdBasedComparator( + shardIdToShardMapOfAllKinesisShards, multiStreamArgs); newLeasesToCreate.sort(startingSequenceNumberComparator); return newLeasesToCreate; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java index 0c3de1bd..d128fc95 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java @@ -27,6 +27,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; + import lombok.AccessLevel; import lombok.Getter; import lombok.NonNull; @@ -82,8 +83,11 @@ public class KinesisShardDetector implements ShardDetector { @NonNull private final KinesisAsyncClient kinesisClient; - @NonNull @Getter + + @NonNull + @Getter private final StreamIdentifier streamIdentifier; + private final long listShardsBackoffTimeInMillis; private final int maxListShardsRetryAttempts; private final long listShardsCacheAllowedAgeInSeconds; @@ -93,21 +97,41 @@ public class KinesisShardDetector implements ShardDetector { private volatile Map cachedShardMap = null; private volatile Instant lastCacheUpdateTime; + @Getter(AccessLevel.PACKAGE) private final AtomicInteger cacheMisses = new AtomicInteger(0); + private static final Boolean THROW_RESOURCE_NOT_FOUND_EXCEPTION = true; + @Deprecated - public KinesisShardDetector(KinesisAsyncClient kinesisClient, String streamName, long listShardsBackoffTimeInMillis, - int maxListShardsRetryAttempts, long listShardsCacheAllowedAgeInSeconds, int maxCacheMissesBeforeReload, + public KinesisShardDetector( + KinesisAsyncClient kinesisClient, + String streamName, + long listShardsBackoffTimeInMillis, + int maxListShardsRetryAttempts, + long listShardsCacheAllowedAgeInSeconds, + int maxCacheMissesBeforeReload, int cacheMissWarningModulus) { - this(kinesisClient, StreamIdentifier.singleStreamInstance(streamName), listShardsBackoffTimeInMillis, maxListShardsRetryAttempts, - listShardsCacheAllowedAgeInSeconds, maxCacheMissesBeforeReload, cacheMissWarningModulus, + this( + kinesisClient, + StreamIdentifier.singleStreamInstance(streamName), + listShardsBackoffTimeInMillis, + maxListShardsRetryAttempts, + listShardsCacheAllowedAgeInSeconds, + maxCacheMissesBeforeReload, + cacheMissWarningModulus, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); } - public KinesisShardDetector(KinesisAsyncClient kinesisClient, StreamIdentifier streamIdentifier, long listShardsBackoffTimeInMillis, - int maxListShardsRetryAttempts, long listShardsCacheAllowedAgeInSeconds, int maxCacheMissesBeforeReload, - int cacheMissWarningModulus, Duration kinesisRequestTimeout) { + public KinesisShardDetector( + KinesisAsyncClient kinesisClient, + StreamIdentifier streamIdentifier, + long listShardsBackoffTimeInMillis, + int maxListShardsRetryAttempts, + long listShardsCacheAllowedAgeInSeconds, + int maxCacheMissesBeforeReload, + int cacheMissWarningModulus, + Duration kinesisRequestTimeout) { this.kinesisClient = kinesisClient; this.streamIdentifier = streamIdentifier; this.listShardsBackoffTimeInMillis = listShardsBackoffTimeInMillis; @@ -141,8 +165,10 @@ public class KinesisShardDetector implements ShardDetector { shard = cachedShardMap.get(shardId); if (shard == null) { - log.warn("Even after cache refresh shard '{}' wasn't found. This could indicate a bigger" - + " problem.", shardId); + log.warn( + "Even after cache refresh shard '{}' wasn't found. This could indicate a bigger" + + " problem.", + shardId); } cacheMisses.set(0); @@ -157,8 +183,8 @@ public class KinesisShardDetector implements ShardDetector { } if (shard == null) { - final String message = String.format("Cannot find the shard given the shardId %s. Cache misses: %s", - shardId, cacheMisses); + final String message = + String.format("Cannot find the shard given the shardId %s. Cache misses: %s", shardId, cacheMisses); if (cacheMisses.get() % cacheMissWarningModulus == 0) { log.warn(message); } else { @@ -175,15 +201,26 @@ public class KinesisShardDetector implements ShardDetector { return listShardsWithFilter(null); } + @Override + @Synchronized + public List listShardsWithoutConsumingResourceNotFoundException() { + return listShardsWithFilterInternal(null, THROW_RESOURCE_NOT_FOUND_EXCEPTION); + } + @Override @Synchronized public List listShardsWithFilter(ShardFilter shardFilter) { + return listShardsWithFilterInternal(shardFilter, !THROW_RESOURCE_NOT_FOUND_EXCEPTION); + } + + private List listShardsWithFilterInternal( + ShardFilter shardFilter, boolean shouldPropagateResourceNotFoundException) { final List shards = new ArrayList<>(); ListShardsResponse result; String nextToken = null; do { - result = listShards(shardFilter, nextToken); + result = listShards(shardFilter, nextToken, shouldPropagateResourceNotFoundException); if (result == null) { /* @@ -201,14 +238,19 @@ public class KinesisShardDetector implements ShardDetector { return shards; } - private ListShardsResponse listShards(ShardFilter shardFilter, final String nextToken) { + /** + * @param shouldPropagateResourceNotFoundException : used to determine if ResourceNotFoundException should be + * handled by method and return Empty list or propagate the exception. + */ + private ListShardsResponse listShards( + ShardFilter shardFilter, final String nextToken, final boolean shouldPropagateResourceNotFoundException) { ListShardsRequest.Builder builder = KinesisRequestsBuilder.listShardsRequestBuilder(); if (StringUtils.isEmpty(nextToken)) { - builder = builder.streamName(streamIdentifier.streamName()).shardFilter(shardFilter); + builder.streamName(streamIdentifier.streamName()).shardFilter(shardFilter); + streamIdentifier.streamArnOptional().ifPresent(arn -> builder.streamARN(arn.toString())); } else { - builder = builder.nextToken(nextToken); + builder.nextToken(nextToken); } - final ListShardsRequest request = builder.build(); log.info("Stream {}: listing shards with list shards request {}", streamIdentifier, request); @@ -232,7 +274,9 @@ public class KinesisShardDetector implements ShardDetector { + " Active or Updating)"); return null; } catch (LimitExceededException e) { - log.info("Got LimitExceededException when listing shards {}. Backing off for {} millis.", streamIdentifier, + log.info( + "Got LimitExceededException when listing shards {}. Backing off for {} millis.", + streamIdentifier, listShardsBackoffTimeInMillis); try { Thread.sleep(listShardsBackoffTimeInMillis); @@ -241,11 +285,17 @@ public class KinesisShardDetector implements ShardDetector { } lastException = e; } catch (ResourceNotFoundException e) { - log.warn("Got ResourceNotFoundException when fetching shard list for {}. Stream no longer exists.", + log.warn( + "Got ResourceNotFoundException when fetching shard list for {}. Stream no longer exists.", streamIdentifier.streamName()); - return ListShardsResponse.builder().shards(Collections.emptyList()) + if (shouldPropagateResourceNotFoundException) { + throw e; + } + return ListShardsResponse.builder() + .shards(Collections.emptyList()) .nextToken(null) .build(); + } catch (TimeoutException te) { throw new RuntimeException(te); } @@ -278,28 +328,30 @@ public class KinesisShardDetector implements ShardDetector { } @Override - public ListShardsResponse getListShardsResponse(ListShardsRequest request) throws - ExecutionException, TimeoutException, InterruptedException { + public ListShardsResponse getListShardsResponse(ListShardsRequest request) + throws ExecutionException, TimeoutException, InterruptedException { return FutureUtils.resolveOrCancelFuture(kinesisClient.listShards(request), kinesisRequestTimeout); } @Override - public List getChildShards(final String shardId) throws InterruptedException, ExecutionException, TimeoutException { - final GetShardIteratorRequest getShardIteratorRequest = KinesisRequestsBuilder.getShardIteratorRequestBuilder() - .streamName(streamIdentifier.streamName()) - .shardIteratorType(ShardIteratorType.LATEST) - .shardId(shardId) - .build(); + public List getChildShards(final String shardId) + throws InterruptedException, ExecutionException, TimeoutException { + final GetShardIteratorRequest.Builder getShardIteratorRequestBuilder = + KinesisRequestsBuilder.getShardIteratorRequestBuilder() + .streamName(streamIdentifier.streamName()) + .shardIteratorType(ShardIteratorType.LATEST) + .shardId(shardId); + streamIdentifier.streamArnOptional().ifPresent(arn -> getShardIteratorRequestBuilder.streamARN(arn.toString())); - final GetShardIteratorResponse getShardIteratorResponse = - FutureUtils.resolveOrCancelFuture(kinesisClient.getShardIterator(getShardIteratorRequest), kinesisRequestTimeout); + final GetShardIteratorResponse getShardIteratorResponse = FutureUtils.resolveOrCancelFuture( + kinesisClient.getShardIterator(getShardIteratorRequestBuilder.build()), kinesisRequestTimeout); - final GetRecordsRequest getRecordsRequest = KinesisRequestsBuilder.getRecordsRequestBuilder() - .shardIterator(getShardIteratorResponse.shardIterator()) - .build(); + final GetRecordsRequest.Builder getRecordsRequestBuilder = KinesisRequestsBuilder.getRecordsRequestBuilder() + .shardIterator(getShardIteratorResponse.shardIterator()); + streamIdentifier.streamArnOptional().ifPresent(arn -> getRecordsRequestBuilder.streamARN(arn.toString())); - final GetRecordsResponse getRecordsResponse = - FutureUtils.resolveOrCancelFuture(kinesisClient.getRecords(getRecordsRequest), kinesisRequestTimeout); + final GetRecordsResponse getRecordsResponse = FutureUtils.resolveOrCancelFuture( + kinesisClient.getRecords(getRecordsRequestBuilder.build()), kinesisRequestTimeout); return getRecordsResponse.childShards(); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java index f761a9a7..01735f9c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java @@ -19,6 +19,7 @@ import java.util.HashSet; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; + import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NoArgsConstructor; @@ -39,12 +40,19 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; @NoArgsConstructor @Getter @Accessors(fluent = true) -@EqualsAndHashCode(exclude = {"concurrencyToken", "lastCounterIncrementNanos", "childShardIds", "pendingCheckpointState", "isMarkedForLeaseSteal"}) +@EqualsAndHashCode( + exclude = { + "concurrencyToken", + "lastCounterIncrementNanos", + "childShardIds", + "pendingCheckpointState", + "isMarkedForLeaseSteal" + }) @ToString public class Lease { /** * See javadoc for System.nanoTime - summary: - * + * * Sometimes System.nanoTime's return values will wrap due to overflow. When they do, the difference between two * values will be very large. We will consider leases to be expired if they are more than a year old. */ @@ -100,36 +108,71 @@ public class Lease { * Count of distinct lease holders between checkpoints. */ private Long ownerSwitchesSinceCheckpoint = 0L; + private final Set parentShardIds = new HashSet<>(); private final Set childShardIds = new HashSet<>(); private HashKeyRangeForLease hashKeyRangeForLease; /** * Copy constructor, used by clone(). - * + * * @param lease lease to copy */ protected Lease(Lease lease) { - this(lease.leaseKey(), lease.leaseOwner(), lease.leaseCounter(), lease.concurrencyToken(), - lease.lastCounterIncrementNanos(), lease.checkpoint(), lease.pendingCheckpoint(), - lease.ownerSwitchesSinceCheckpoint(), lease.parentShardIds(), lease.childShardIds(), - lease.pendingCheckpointState(), lease.hashKeyRangeForLease()); + this( + lease.leaseKey(), + lease.leaseOwner(), + lease.leaseCounter(), + lease.concurrencyToken(), + lease.lastCounterIncrementNanos(), + lease.checkpoint(), + lease.pendingCheckpoint(), + lease.ownerSwitchesSinceCheckpoint(), + lease.parentShardIds(), + lease.childShardIds(), + lease.pendingCheckpointState(), + lease.hashKeyRangeForLease()); } @Deprecated - public Lease(final String leaseKey, final String leaseOwner, final Long leaseCounter, - final UUID concurrencyToken, final Long lastCounterIncrementNanos, - final ExtendedSequenceNumber checkpoint, final ExtendedSequenceNumber pendingCheckpoint, - final Long ownerSwitchesSinceCheckpoint, final Set parentShardIds) { - this(leaseKey, leaseOwner, leaseCounter, concurrencyToken, lastCounterIncrementNanos, checkpoint, pendingCheckpoint, - ownerSwitchesSinceCheckpoint, parentShardIds, new HashSet<>(), null, null); + public Lease( + final String leaseKey, + final String leaseOwner, + final Long leaseCounter, + final UUID concurrencyToken, + final Long lastCounterIncrementNanos, + final ExtendedSequenceNumber checkpoint, + final ExtendedSequenceNumber pendingCheckpoint, + final Long ownerSwitchesSinceCheckpoint, + final Set parentShardIds) { + this( + leaseKey, + leaseOwner, + leaseCounter, + concurrencyToken, + lastCounterIncrementNanos, + checkpoint, + pendingCheckpoint, + ownerSwitchesSinceCheckpoint, + parentShardIds, + new HashSet<>(), + null, + null); } - public Lease(final String leaseKey, final String leaseOwner, final Long leaseCounter, - final UUID concurrencyToken, final Long lastCounterIncrementNanos, - final ExtendedSequenceNumber checkpoint, final ExtendedSequenceNumber pendingCheckpoint, - final Long ownerSwitchesSinceCheckpoint, final Set parentShardIds, final Set childShardIds, - final byte[] pendingCheckpointState, final HashKeyRangeForLease hashKeyRangeForLease) { + public Lease( + final String leaseKey, + final String leaseOwner, + final Long leaseCounter, + final UUID concurrencyToken, + final Long lastCounterIncrementNanos, + final ExtendedSequenceNumber checkpoint, + final ExtendedSequenceNumber pendingCheckpoint, + final Long ownerSwitchesSinceCheckpoint, + final Set parentShardIds, + final Set childShardIds, + final byte[] pendingCheckpointState, + final HashKeyRangeForLease hashKeyRangeForLease) { this.leaseKey = leaseKey; this.leaseOwner = leaseOwner; this.leaseCounter = leaseCounter; @@ -159,7 +202,7 @@ public class Lease { /** * Updates this Lease's mutable, application-specific fields based on the passed-in lease object. Does not update * fields that are internal to the leasing library (leaseKey, leaseOwner, leaseCounter). - * + * * @param lease */ public void update(final Lease lease) { @@ -171,6 +214,15 @@ public class Lease { childShardIds(lease.childShardIds); } + /** + * @param leaseDurationNanos duration of lease in nanoseconds + * @param asOfNanos time in nanoseconds to check expiration as-of + * @return true if lease lease is ready to be taken + */ + public boolean isAvailable(long leaseDurationNanos, long asOfNanos) { + return isUnassigned() || isExpired(leaseDurationNanos, asOfNanos); + } + /** * @param leaseDurationNanos duration of lease in nanoseconds * @param asOfNanos time in nanoseconds to check expiration as-of @@ -190,9 +242,16 @@ public class Lease { } } + /** + * @return true if lease is not currently owned + */ + private boolean isUnassigned() { + return leaseOwner == null; + } + /** * Sets lastCounterIncrementNanos - * + * * @param lastCounterIncrementNanos last renewal in nanoseconds since the epoch */ public void lastCounterIncrementNanos(Long lastCounterIncrementNanos) { @@ -201,7 +260,7 @@ public class Lease { /** * Sets concurrencyToken. - * + * * @param concurrencyToken may not be null */ public void concurrencyToken(@NonNull final UUID concurrencyToken) { @@ -210,7 +269,7 @@ public class Lease { /** * Sets leaseKey. LeaseKey is immutable once set. - * + * * @param leaseKey may not be null. */ public void leaseKey(@NonNull final String leaseKey) { @@ -222,7 +281,7 @@ public class Lease { /** * Sets leaseCounter. - * + * * @param leaseCounter may not be null */ public void leaseCounter(@NonNull final Long leaseCounter) { @@ -298,7 +357,7 @@ public class Lease { /** * Sets leaseOwner. - * + * * @param leaseOwner may be null. */ public void leaseOwner(String leaseOwner) { @@ -307,11 +366,10 @@ public class Lease { /** * Returns a deep copy of this object. Type-unsafe - there aren't good mechanisms for copy-constructing generics. - * + * * @return A deep copy of this object. */ public Lease copy() { return new Lease(this); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCleanupManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCleanupManager.java index f9e52e1c..739732d2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCleanupManager.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCleanupManager.java @@ -15,6 +15,18 @@ package software.amazon.kinesis.leases; +import java.util.HashSet; +import java.util.Objects; +import java.util.Optional; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Stopwatch; import lombok.EqualsAndHashCode; @@ -28,40 +40,31 @@ import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion; import software.amazon.kinesis.leases.exceptions.InvalidStateException; +import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.retrieval.AWSExceptionManager; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.util.HashSet; -import java.util.Objects; -import java.util.Optional; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; - /** * Helper class to cleanup of any expired/closed shard leases. It will cleanup leases periodically as defined by * {@link LeaseManagementConfig#leaseCleanupConfig()} asynchronously. */ -@Accessors(fluent=true) +@Accessors(fluent = true) @Slf4j @RequiredArgsConstructor @EqualsAndHashCode public class LeaseCleanupManager { @NonNull private final LeaseCoordinator leaseCoordinator; + @NonNull private final MetricsFactory metricsFactory; + @NonNull private final ScheduledExecutorService deletionThreadPool; + private final boolean cleanupLeasesUponShardCompletion; private final long leaseCleanupIntervalMillis; private final long completedLeaseCleanupIntervalMillis; @@ -85,8 +88,8 @@ public class LeaseCleanupManager { log.info("Starting lease cleanup thread."); completedLeaseStopwatch.reset().start(); garbageLeaseStopwatch.reset().start(); - deletionThreadPool.scheduleAtFixedRate(new LeaseCleanupThread(), INITIAL_DELAY, leaseCleanupIntervalMillis, - TimeUnit.MILLISECONDS); + deletionThreadPool.scheduleAtFixedRate( + new LeaseCleanupThread(), INITIAL_DELAY, leaseCleanupIntervalMillis, TimeUnit.MILLISECONDS); isRunning = true; } else { log.info("Lease cleanup thread already running, no need to start."); @@ -117,8 +120,10 @@ public class LeaseCleanupManager { public void enqueueForDeletion(LeasePendingDeletion leasePendingDeletion) { final Lease lease = leasePendingDeletion.lease(); if (lease == null) { - log.warn("Cannot enqueue lease {} for deferred deletion - instance doesn't hold the lease for that shard.", - lease.leaseKey()); + log.warn( + "Cannot enqueue {} for {} as instance doesn't hold the lease for that shard.", + leasePendingDeletion.shardInfo(), + leasePendingDeletion.streamIdentifier()); } else { log.debug("Enqueuing lease {} for deferred deletion.", lease.leaseKey()); if (!deletionQueue.add(leasePendingDeletion)) { @@ -161,9 +166,12 @@ public class LeaseCleanupManager { return garbageLeaseStopwatch.elapsed(TimeUnit.MILLISECONDS) >= garbageLeaseCleanupIntervalMillis; } - public LeaseCleanupResult cleanupLease(LeasePendingDeletion leasePendingDeletion, - boolean timeToCheckForCompletedShard, boolean timeToCheckForGarbageShard) throws TimeoutException, - InterruptedException, DependencyException, ProvisionedThroughputException, InvalidStateException { + public LeaseCleanupResult cleanupLease( + LeasePendingDeletion leasePendingDeletion, + boolean timeToCheckForCompletedShard, + boolean timeToCheckForGarbageShard) + throws TimeoutException, InterruptedException, DependencyException, ProvisionedThroughputException, + InvalidStateException { final Lease lease = leasePendingDeletion.lease(); final ShardInfo shardInfo = leasePendingDeletion.shardInfo(); final StreamIdentifier streamIdentifier = leasePendingDeletion.streamIdentifier(); @@ -179,7 +187,7 @@ public class LeaseCleanupManager { try { if (cleanupLeasesUponShardCompletion && timeToCheckForCompletedShard) { final Lease leaseFromDDB = leaseCoordinator.leaseRefresher().getLease(lease.leaseKey()); - if(leaseFromDDB != null) { + if (leaseFromDDB != null) { Set childShardKeys = leaseFromDDB.childShardIds(); if (CollectionUtils.isNullOrEmpty(childShardKeys)) { try { @@ -188,7 +196,8 @@ public class LeaseCleanupManager { if (CollectionUtils.isNullOrEmpty(childShardKeys)) { log.error( "No child shards returned from service for shard {} for {} while cleaning up lease.", - shardInfo.shardId(), streamIdentifier.streamName()); + shardInfo.shardId(), + streamIdentifier.streamName()); } else { wereChildShardsPresent = true; updateLeaseWithChildShards(leasePendingDeletion, childShardKeys); @@ -205,18 +214,25 @@ public class LeaseCleanupManager { cleanedUpCompletedLease = cleanupLeaseForCompletedShard(lease, shardInfo, childShardKeys); } catch (Exception e) { // Suppressing the exception here, so that we can attempt for garbage cleanup. - log.warn("Unable to cleanup lease for shard {} in {}", shardInfo.shardId(), streamIdentifier.streamName(), e); + log.warn( + "Unable to cleanup lease for shard {} in {}", + shardInfo.shardId(), + streamIdentifier.streamName(), + e); } } else { - log.info("Lease not present in lease table while cleaning the shard {} of {}", shardInfo.shardId(), streamIdentifier.streamName()); + log.info( + "Lease not present in lease table while cleaning the shard {} of {}", + shardInfo.shardId(), + streamIdentifier.streamName()); cleanedUpCompletedLease = true; } } if (!alreadyCheckedForGarbageCollection && timeToCheckForGarbageShard) { try { - wereChildShardsPresent = !CollectionUtils - .isNullOrEmpty(leasePendingDeletion.getChildShardsFromService()); + wereChildShardsPresent = + !CollectionUtils.isNullOrEmpty(leasePendingDeletion.getChildShardsFromService()); } catch (ExecutionException e) { throw exceptionManager.apply(e.getCause()); } @@ -226,20 +242,23 @@ public class LeaseCleanupManager { cleanedUpGarbageLease = cleanupLeaseForGarbageShard(lease, e); } - return new LeaseCleanupResult(cleanedUpCompletedLease, cleanedUpGarbageLease, wereChildShardsPresent, - wasResourceNotFound); + return new LeaseCleanupResult( + cleanedUpCompletedLease, cleanedUpGarbageLease, wereChildShardsPresent, wasResourceNotFound); } // A lease that ended with SHARD_END from ResourceNotFoundException is safe to delete if it no longer exists in the // stream (known explicitly from ResourceNotFound being thrown when processing this shard), - private boolean cleanupLeaseForGarbageShard(Lease lease, Throwable e) throws DependencyException, ProvisionedThroughputException, InvalidStateException { + private boolean cleanupLeaseForGarbageShard(Lease lease, Throwable e) + throws DependencyException, ProvisionedThroughputException, InvalidStateException { log.warn("Deleting lease {} as it is not present in the stream.", lease, e); leaseCoordinator.leaseRefresher().deleteLease(lease); return true; } /** - * Check if the all of the parent shards for a given lease have an ongoing lease. If any one parent still has a lease, return false. Otherwise return true + * Check if the all of the parent shards for a given lease have an ongoing lease. If any one parent still has a + * lease, return false. Otherwise return true + * * @param lease * @param shardInfo * @return @@ -247,13 +266,18 @@ public class LeaseCleanupManager { * @throws ProvisionedThroughputException * @throws InvalidStateException */ - private boolean allParentShardLeasesDeleted(Lease lease, ShardInfo shardInfo) throws DependencyException, ProvisionedThroughputException, InvalidStateException { + private boolean allParentShardLeasesDeleted(Lease lease, ShardInfo shardInfo) + throws DependencyException, ProvisionedThroughputException, InvalidStateException { for (String parentShard : lease.parentShardIds()) { - final Lease parentLease = leaseCoordinator.leaseRefresher().getLease(ShardInfo.getLeaseKey(shardInfo, parentShard)); + final Lease parentLease = + leaseCoordinator.leaseRefresher().getLease(ShardInfo.getLeaseKey(shardInfo, parentShard)); if (parentLease != null) { - log.warn("Lease {} has a parent lease {} which is still present in the lease table, skipping deletion " + - "for this lease.", lease, parentLease); + log.warn( + "Lease {} has a parent lease {} which is still present in the lease table, skipping deletion " + + "for this lease.", + lease, + parentLease); return false; } } @@ -266,27 +290,29 @@ public class LeaseCleanupManager { private boolean cleanupLeaseForCompletedShard(Lease lease, ShardInfo shardInfo, Set childShardKeys) throws DependencyException, ProvisionedThroughputException, InvalidStateException, IllegalStateException { final Set processedChildShardLeaseKeys = new HashSet<>(); - final Set childShardLeaseKeys = childShardKeys.stream().map(ck -> ShardInfo.getLeaseKey(shardInfo, ck)) + final Set childShardLeaseKeys = childShardKeys.stream() + .map(ck -> ShardInfo.getLeaseKey(shardInfo, ck)) .collect(Collectors.toSet()); for (String childShardLeaseKey : childShardLeaseKeys) { final Lease childShardLease = Optional.ofNullable( - leaseCoordinator.leaseRefresher().getLease(childShardLeaseKey)) - .orElseThrow(() -> new IllegalStateException( - "Child lease " + childShardLeaseKey + " for completed shard not found in " - + "lease table - not cleaning up lease " + lease)); + leaseCoordinator.leaseRefresher().getLease(childShardLeaseKey)) + .orElseThrow(() -> new IllegalStateException("Child lease " + childShardLeaseKey + + " for completed shard not found in " + "lease table - not cleaning up lease " + lease)); - if (!childShardLease.checkpoint().equals(ExtendedSequenceNumber.TRIM_HORIZON) && !childShardLease - .checkpoint().equals(ExtendedSequenceNumber.AT_TIMESTAMP)) { + if (!childShardLease.checkpoint().equals(ExtendedSequenceNumber.TRIM_HORIZON) + && !childShardLease.checkpoint().equals(ExtendedSequenceNumber.AT_TIMESTAMP)) { processedChildShardLeaseKeys.add(childShardLease.leaseKey()); } } - if (!allParentShardLeasesDeleted(lease, shardInfo) || !Objects.equals(childShardLeaseKeys, processedChildShardLeaseKeys)) { + if (!allParentShardLeasesDeleted(lease, shardInfo) + || !Objects.equals(childShardLeaseKeys, processedChildShardLeaseKeys)) { return false; } - log.info("Deleting lease {} as it has been completely processed and processing of child shard(s) has begun.", + log.info( + "Deleting lease {} as it has been completely processed and processing of child shard(s) has begun.", lease); leaseCoordinator.leaseRefresher().deleteLease(lease); @@ -326,23 +352,36 @@ public class LeaseCleanupManager { final StreamIdentifier streamIdentifier = leasePendingDeletion.streamIdentifier(); boolean deletionSucceeded = false; try { - final LeaseCleanupResult leaseCleanupResult = cleanupLease(leasePendingDeletion, - timeToCheckForCompletedShard(), timeToCheckForGarbageShard()); + final LeaseCleanupResult leaseCleanupResult = cleanupLease( + leasePendingDeletion, timeToCheckForCompletedShard(), timeToCheckForGarbageShard()); completedLeaseCleanedUp |= leaseCleanupResult.cleanedUpCompletedLease(); garbageLeaseCleanedUp |= leaseCleanupResult.cleanedUpGarbageLease(); if (leaseCleanupResult.leaseCleanedUp()) { - log.info("Successfully cleaned up lease {} for {} due to {}", leaseKey, streamIdentifier, leaseCleanupResult); + log.info( + "Successfully cleaned up lease {} for {} due to {}", + leaseKey, + streamIdentifier, + leaseCleanupResult); deletionSucceeded = true; } else { - log.warn("Unable to clean up lease {} for {} due to {}", leaseKey, streamIdentifier, leaseCleanupResult); + log.warn( + "Unable to clean up lease {} for {} due to {}", + leaseKey, + streamIdentifier, + leaseCleanupResult); } } catch (Exception e) { - log.error("Failed to cleanup lease {} for {}. Will re-enqueue for deletion and retry on next " + - "scheduled execution.", leaseKey, streamIdentifier, e); + log.error( + "Failed to cleanup lease {} for {}. Will re-enqueue for deletion and retry on next " + + "scheduled execution.", + leaseKey, + streamIdentifier, + e); } if (!deletionSucceeded) { - log.debug("Did not cleanup lease {} for {}. Re-enqueueing for deletion.", leaseKey, streamIdentifier); + log.debug( + "Did not cleanup lease {} for {}. Re-enqueueing for deletion.", leaseKey, streamIdentifier); failedDeletions.add(leasePendingDeletion); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java index 6437f339..acc08dab 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java @@ -136,7 +136,7 @@ public interface LeaseCoordinator { * @return all leases for the application that are in the lease table */ default List allLeases() { - return Collections.emptyList(); + return Collections.emptyList(); } /** diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java index 4f2d3a2b..2d4e041c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java @@ -15,21 +15,24 @@ package software.amazon.kinesis.leases; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - import java.time.Duration; +import java.util.Collection; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Function; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; import lombok.Data; import lombok.NonNull; import lombok.experimental.Accessors; import org.apache.commons.lang3.Validate; +import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.BillingMode; +import software.amazon.awssdk.services.dynamodb.model.Tag; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.common.InitialPositionInStreamExtended; @@ -49,13 +52,18 @@ public class LeaseManagementConfig { public static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofMinutes(1); - public static final long DEFAULT_LEASE_CLEANUP_INTERVAL_MILLIS = Duration.ofMinutes(1).toMillis(); - public static final long DEFAULT_COMPLETED_LEASE_CLEANUP_INTERVAL_MILLIS = Duration.ofMinutes(5).toMillis(); - public static final long DEFAULT_GARBAGE_LEASE_CLEANUP_INTERVAL_MILLIS = Duration.ofMinutes(30).toMillis(); + public static final long DEFAULT_LEASE_CLEANUP_INTERVAL_MILLIS = + Duration.ofMinutes(1).toMillis(); + public static final long DEFAULT_COMPLETED_LEASE_CLEANUP_INTERVAL_MILLIS = + Duration.ofMinutes(5).toMillis(); + public static final long DEFAULT_GARBAGE_LEASE_CLEANUP_INTERVAL_MILLIS = + Duration.ofMinutes(30).toMillis(); public static final long DEFAULT_PERIODIC_SHARD_SYNC_INTERVAL_MILLIS = 2 * 60 * 1000L; + public static final boolean DEFAULT_LEASE_TABLE_DELETION_PROTECTION_ENABLED = false; + public static final boolean DEFAULT_LEASE_TABLE_PITR_ENABLED = false; + public static final boolean DEFAULT_ENABLE_PRIORITY_LEASE_ASSIGNMENT = true; public static final int DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY = 3; - public static final LeaseCleanupConfig DEFAULT_LEASE_CLEANUP_CONFIG = LeaseCleanupConfig.builder() .leaseCleanupIntervalMillis(DEFAULT_LEASE_CLEANUP_INTERVAL_MILLIS) .completedLeaseCleanupIntervalMillis(DEFAULT_COMPLETED_LEASE_CLEANUP_INTERVAL_MILLIS) @@ -98,6 +106,17 @@ public class LeaseManagementConfig { */ private long failoverTimeMillis = 10000L; + /** + * Whether workers should take very expired leases at priority. A very expired lease is when a worker does not + * renew its lease in 3 * {@link LeaseManagementConfig#failoverTimeMillis}. Very expired leases will be taken at + * priority for a worker which disregards the target leases for the worker but obeys + * {@link LeaseManagementConfig#maxLeasesForWorker}. New leases for new shards due to shard mutation are + * considered to be very expired and taken with priority. + * + *

    Default value: true

    + */ + private boolean enablePriorityLeaseAssignment = DEFAULT_ENABLE_PRIORITY_LEASE_ASSIGNMENT; + /** * Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks. * @@ -190,6 +209,29 @@ public class LeaseManagementConfig { private BillingMode billingMode = BillingMode.PAY_PER_REQUEST; + /** + * Whether to enable deletion protection on the DynamoDB lease table created by KCL. This does not update + * already existing tables. + * + *

    Default value: false + */ + private boolean leaseTableDeletionProtectionEnabled = DEFAULT_LEASE_TABLE_DELETION_PROTECTION_ENABLED; + + /** + * Whether to enable PITR (point in time recovery) on the DynamoDB lease table created by KCL. If true, this can + * update existing table's PITR. + * + *

    Default value: false + */ + private boolean leaseTablePitrEnabled = DEFAULT_LEASE_TABLE_PITR_ENABLED; + + /** + * The list of tags to be applied to the DynamoDB table created for lease management. + * + *

    Default value: {@link DefaultSdkAutoConstructList} + */ + private Collection tags = DefaultSdkAutoConstructList.getInstance(); + /** * Frequency (in millis) of the auditor job to scan for partial leases in the lease table. * If the auditor detects any hole in the leases for a stream, then it would trigger shard sync based on @@ -202,7 +244,8 @@ public class LeaseManagementConfig { * is inconsistent. If the auditor finds same set of inconsistencies consecutively for a stream for this many times, * then it would trigger a shard sync. */ - private int leasesRecoveryAuditorInconsistencyConfidenceThreshold = DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY; + private int leasesRecoveryAuditorInconsistencyConfidenceThreshold = + DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY; /** * The initial position for getting records from Kinesis streams. @@ -219,8 +262,12 @@ public class LeaseManagementConfig { private MetricsFactory metricsFactory = new NullMetricsFactory(); @Deprecated - public LeaseManagementConfig(String tableName, DynamoDbAsyncClient dynamoDBClient, KinesisAsyncClient kinesisClient, - String streamName, String workerIdentifier) { + public LeaseManagementConfig( + String tableName, + DynamoDbAsyncClient dynamoDBClient, + KinesisAsyncClient kinesisClient, + String streamName, + String workerIdentifier) { this.tableName = tableName; this.dynamoDBClient = dynamoDBClient; this.kinesisClient = kinesisClient; @@ -228,7 +275,10 @@ public class LeaseManagementConfig { this.workerIdentifier = workerIdentifier; } - public LeaseManagementConfig(String tableName, DynamoDbAsyncClient dynamoDBClient, KinesisAsyncClient kinesisClient, + public LeaseManagementConfig( + String tableName, + DynamoDbAsyncClient dynamoDBClient, + KinesisAsyncClient kinesisClient, String workerIdentifier) { this.tableName = tableName; this.dynamoDBClient = dynamoDBClient; @@ -272,14 +322,20 @@ public class LeaseManagementConfig { * *

    Default value: {@link LeaseManagementThreadPool}

    */ - private ExecutorService executorService = new LeaseManagementThreadPool( - new ThreadFactoryBuilder().setNameFormat("ShardSyncTaskManager-%04d").build()); + private ExecutorService executorService = new LeaseManagementThreadPool(new ThreadFactoryBuilder() + .setNameFormat("ShardSyncTaskManager-%04d") + .build()); static class LeaseManagementThreadPool extends ThreadPoolExecutor { private static final long DEFAULT_KEEP_ALIVE_TIME = 60L; LeaseManagementThreadPool(ThreadFactory threadFactory) { - super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE_TIME, TimeUnit.SECONDS, new SynchronousQueue<>(), + super( + 0, + Integer.MAX_VALUE, + DEFAULT_KEEP_ALIVE_TIME, + TimeUnit.SECONDS, + new SynchronousQueue<>(), threadFactory); } } @@ -299,7 +355,7 @@ public class LeaseManagementConfig { private LeaseManagementFactory leaseManagementFactory; public HierarchicalShardSyncer hierarchicalShardSyncer() { - if(hierarchicalShardSyncer == null) { + if (hierarchicalShardSyncer == null) { hierarchicalShardSyncer = new HierarchicalShardSyncer(); } return hierarchicalShardSyncer; @@ -309,7 +365,8 @@ public class LeaseManagementConfig { public LeaseManagementFactory leaseManagementFactory() { if (leaseManagementFactory == null) { Validate.notEmpty(streamName(), "Stream name is empty"); - leaseManagementFactory = new DynamoDBLeaseManagementFactory(kinesisClient(), + leaseManagementFactory = new DynamoDBLeaseManagementFactory( + kinesisClient(), streamName(), dynamoDBClient(), tableName(), @@ -333,7 +390,10 @@ public class LeaseManagementConfig { initialLeaseTableReadCapacity(), initialLeaseTableWriteCapacity(), hierarchicalShardSyncer(), - tableCreatorCallback(), dynamoDbRequestTimeout(), billingMode()); + tableCreatorCallback(), + dynamoDbRequestTimeout(), + billingMode(), + tags()); } return leaseManagementFactory; } @@ -344,14 +404,17 @@ public class LeaseManagementConfig { * @param isMultiStreamingMode * @return LeaseManagementFactory */ - public LeaseManagementFactory leaseManagementFactory(final LeaseSerializer leaseSerializer, boolean isMultiStreamingMode) { - if(leaseManagementFactory == null) { - leaseManagementFactory = new DynamoDBLeaseManagementFactory(kinesisClient(), + public LeaseManagementFactory leaseManagementFactory( + final LeaseSerializer leaseSerializer, boolean isMultiStreamingMode) { + if (leaseManagementFactory == null) { + leaseManagementFactory = new DynamoDBLeaseManagementFactory( + kinesisClient(), dynamoDBClient(), tableName(), workerIdentifier(), executorService(), failoverTimeMillis(), + enablePriorityLeaseAssignment(), epsilonMillis(), maxLeasesForWorker(), maxLeasesToStealAtOneTime(), @@ -371,6 +434,9 @@ public class LeaseManagementConfig { tableCreatorCallback(), dynamoDbRequestTimeout(), billingMode(), + leaseTableDeletionProtectionEnabled(), + leaseTablePitrEnabled(), + tags(), leaseSerializer, customShardDetectorProvider(), isMultiStreamingMode, diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java index ecf9b390..9ed77a53 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java @@ -16,6 +16,7 @@ package software.amazon.kinesis.leases; import software.amazon.kinesis.common.StreamConfig; +import software.amazon.kinesis.coordinator.DeletedStreamListProvider; import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; import software.amazon.kinesis.metrics.MetricsFactory; @@ -31,6 +32,13 @@ public interface LeaseManagementFactory { throw new UnsupportedOperationException(); } + default ShardSyncTaskManager createShardSyncTaskManager( + MetricsFactory metricsFactory, + StreamConfig streamConfig, + DeletedStreamListProvider deletedStreamListProvider) { + throw new UnsupportedOperationException("createShardSyncTaskManager method not implemented"); + } + DynamoDBLeaseRefresher createLeaseRefresher(); ShardDetector createShardDetector(); @@ -40,5 +48,4 @@ public interface LeaseManagementFactory { } LeaseCleanupManager createLeaseCleanupManager(MetricsFactory metricsFactory); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java index 7ec5b5ec..c38d442a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java @@ -47,31 +47,30 @@ public interface LeaseRefresher { /** * Creates the table that will store leases. Table is now created in PayPerRequest billing mode by default. * Succeeds if table already exists. - * + * * @return true if we created a new table (table didn't exist before) - * + * * @throws ProvisionedThroughputException if we cannot create the lease table due to per-AWS-account capacity * restrictions. * @throws DependencyException if DynamoDB createTable fails in an unexpected way */ - boolean createLeaseTableIfNotExists() - throws ProvisionedThroughputException, DependencyException; + boolean createLeaseTableIfNotExists() throws ProvisionedThroughputException, DependencyException; /** * @return true if the lease table already exists. - * + * * @throws DependencyException if DynamoDB describeTable fails in an unexpected way */ boolean leaseTableExists() throws DependencyException; /** * Blocks until the lease table exists by polling leaseTableExists. - * + * * @param secondsBetweenPolls time to wait between polls in seconds * @param timeoutSeconds total time to wait in seconds - * + * * @return true if table exists, false if timeout was reached - * + * * @throws DependencyException if DynamoDB describeTable fails in an unexpected way */ boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException; @@ -85,41 +84,41 @@ public interface LeaseRefresher { * * @return list of leases */ - List listLeasesForStream(StreamIdentifier streamIdentifier) throws DependencyException, InvalidStateException, - ProvisionedThroughputException; + List listLeasesForStream(StreamIdentifier streamIdentifier) + throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * List all objects in table synchronously. - * + * * @throws DependencyException if DynamoDB scan fails in an unexpected way * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB scan fails due to lack of capacity - * + * * @return list of leases */ List listLeases() throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Create a new lease. Conditional on a lease not already existing with this shardId. - * + * * @param lease the lease to create - * + * * @return true if lease was created, false if lease already exists - * + * * @throws DependencyException if DynamoDB put fails in an unexpected way * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB put fails due to lack of capacity */ boolean createLeaseIfNotExists(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * @param leaseKey Get the lease for this leasekey - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB get fails due to lack of capacity * @throws DependencyException if DynamoDB get fails in an unexpected way - * + * * @return lease for the specified leaseKey, or null if one doesn't exist */ Lease getLease(String leaseKey) throws DependencyException, InvalidStateException, ProvisionedThroughputException; @@ -127,55 +126,53 @@ public interface LeaseRefresher { /** * Renew a lease by incrementing the lease counter. Conditional on the leaseCounter in DynamoDB matching the leaseCounter * of the input. Mutates the leaseCounter of the passed-in lease object after updating the record in DynamoDB. - * + * * @param lease the lease to renew - * + * * @return true if renewal succeeded, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean renewLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + boolean renewLease(Lease lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Take a lease for the given owner by incrementing its leaseCounter and setting its owner field. Conditional on * the leaseCounter in DynamoDB matching the leaseCounter of the input. Mutates the leaseCounter and owner of the * passed-in lease object after updating DynamoDB. - * + * * @param lease the lease to take * @param owner the new owner - * + * * @return true if lease was successfully taken, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ boolean takeLease(Lease lease, String owner) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Evict the current owner of lease by setting owner to null. Conditional on the owner in DynamoDB matching the owner of * the input. Mutates the lease counter and owner of the passed-in lease object after updating the record in DynamoDB. - * + * * @param lease the lease to void - * + * * @return true if eviction succeeded, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean evictLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + boolean evictLease(Lease lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Delete the given lease from DynamoDB. Does nothing when passed a lease that does not exist in DynamoDB. - * + * * @param lease the lease to delete - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB delete fails due to lack of capacity * @throws DependencyException if DynamoDB delete fails in an unexpected way @@ -184,7 +181,7 @@ public interface LeaseRefresher { /** * Delete all leases from DynamoDB. Useful for tools/utils and testing. - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB scan or delete fail due to lack of capacity * @throws DependencyException if DynamoDB scan or delete fail in an unexpected way @@ -196,15 +193,14 @@ public interface LeaseRefresher { * library such as leaseCounter, leaseOwner, or leaseKey. Conditional on the leaseCounter in DynamoDB matching the * leaseCounter of the input. Increments the lease counter in DynamoDB so that updates can be contingent on other * updates. Mutates the lease counter of the passed-in lease object. - * + * * @return true if update succeeded, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean updateLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + boolean updateLease(Lease lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Update application-specific fields of the given lease in DynamoDB. Does not update fields managed by the leasing @@ -221,9 +217,9 @@ public interface LeaseRefresher { /** * Check (synchronously) if there are any leases in the lease table. - * + * * @return true if there are no leases in the lease table - * + * * @throws DependencyException if DynamoDB scan fails in an unexpected way * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB scan fails due to lack of capacity diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java index 25ec5b45..61d9643c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java @@ -28,18 +28,18 @@ import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; * that worker. */ public interface LeaseRenewer { - + /** * Bootstrap initial set of leases from the {@link LeaseRefresher} (e.g. upon process restart, pick up leases we own) * @throws DependencyException on unexpected DynamoDB failures * @throws InvalidStateException if lease table doesn't exist * @throws ProvisionedThroughputException if DynamoDB reads fail due to insufficient capacity */ - void initialize() throws DependencyException, InvalidStateException, ProvisionedThroughputException; + void initialize() throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Attempt to renew all currently held leases. - * + * * @throws DependencyException on unexpected DynamoDB failures * @throws InvalidStateException if lease table does not exist */ @@ -54,7 +54,7 @@ public interface LeaseRenewer { /** * @param leaseKey key of the lease to retrieve - * + * * @return a deep copy of a currently held lease, or null if we don't hold the lease */ Lease getCurrentlyHeldLease(String leaseKey); @@ -62,7 +62,7 @@ public interface LeaseRenewer { /** * Adds leases to this LeaseRenewer's set of currently held leases. Leases must have lastRenewalNanos set to the * last time the lease counter was incremented before being passed to this method. - * + * * @param newLeases new leases. */ void addLeasesToRenew(Collection newLeases); @@ -74,7 +74,7 @@ public interface LeaseRenewer { /** * Stops the lease renewer from continunig to maintain the given lease. - * + * * @param lease the lease to drop. */ void dropLease(Lease lease); @@ -83,20 +83,19 @@ public interface LeaseRenewer { * Update application-specific fields in a currently held lease. Cannot be used to update internal fields such as * leaseCounter, leaseOwner, etc. Fails if we do not hold the lease, or if the concurrency token does not match * the concurrency token on the internal authoritative copy of the lease (ie, if we lost and re-acquired the lease). - * + * * @param lease lease object containing updated data * @param concurrencyToken obtained by calling Lease.concurrencyToken for a currently held lease * @param operation that performs updateLease * @param singleStreamShardId shardId for metrics emission in single stream mode. MultiStream mode will get the * shardId from the lease object - * + * * @return true if update succeeds, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ boolean updateLease(Lease lease, UUID concurrencyToken, String operation, String singleStreamShardId) throws DependencyException, InvalidStateException, ProvisionedThroughputException; - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java index f36f5a66..5d7bea63 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java @@ -16,6 +16,7 @@ package software.amazon.kinesis.leases; import java.util.Collection; import java.util.Map; + import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; @@ -29,7 +30,7 @@ public interface LeaseSerializer { /** * Construct a DynamoDB record out of a Lease object - * + * * @param lease lease object to serialize * @return an attribute value map representing the lease object */ @@ -37,13 +38,12 @@ public interface LeaseSerializer { /** * Construct a Lease object out of a DynamoDB record. - * + * * @param dynamoRecord attribute value map from DynamoDB * @return a deserialized lease object representing the attribute value map */ Lease fromDynamoRecord(Map dynamoRecord); - default Lease fromDynamoRecord(Map dynamoRecord, Lease leaseToUpdate) { throw new UnsupportedOperationException(); } @@ -56,7 +56,7 @@ public interface LeaseSerializer { /** * Special getDynamoHashKey implementation used by {@link LeaseRefresher#getLease(String)}. - * + * * @param leaseKey * @return the attribute value map representing a Lease's hash key given a string. */ @@ -131,5 +131,4 @@ public interface LeaseSerializer { * @return attribute definitions for creating a DynamoDB table to store leases */ Collection getAttributeDefinitions(); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java index ead8c195..2d082edb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java @@ -29,14 +29,14 @@ public interface LeaseTaker { /** * Compute the set of leases available to be taken and attempt to take them. Lease taking rules are: - * + * * 1) If a lease's counter hasn't changed in long enough, try to take it. * 2) If we see a lease we've never seen before, take it only if owner == null. If it's owned, odds are the owner is * holding it. We can't tell until we see it more than once. * 3) For load balancing purposes, you may violate rules 1 and 2 for EXACTLY ONE lease per call of takeLeases(). - * + * * @return map of shardId to Lease object for leases we just successfully took. - * + * * @throws DependencyException on unexpected DynamoDB failures * @throws InvalidStateException if lease table does not exist */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/MultiStreamLease.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/MultiStreamLease.java index c8811354..36e108c5 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/MultiStreamLease.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/MultiStreamLease.java @@ -23,8 +23,6 @@ import lombok.Setter; import lombok.experimental.Accessors; import org.apache.commons.lang3.Validate; -import java.util.Objects; - import static com.google.common.base.Verify.verifyNotNull; @Setter @@ -34,8 +32,11 @@ import static com.google.common.base.Verify.verifyNotNull; @EqualsAndHashCode(callSuper = true) public class MultiStreamLease extends Lease { - @NonNull private String streamIdentifier; - @NonNull private String shardId; + @NonNull + private String streamIdentifier; + + @NonNull + private String shardId; public MultiStreamLease(MultiStreamLease other) { super(other); @@ -76,5 +77,4 @@ public class MultiStreamLease extends Lease { Validate.isInstanceOf(MultiStreamLease.class, lease); return (MultiStreamLease) lease; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java index 9b97086d..2944e2d1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java @@ -19,14 +19,12 @@ import java.util.List; /** * Shard Prioritization that returns the same original list of shards without any modifications. */ -public class NoOpShardPrioritization implements - ShardPrioritization { +public class NoOpShardPrioritization implements ShardPrioritization { /** * Empty constructor for NoOp Shard Prioritization. */ - public NoOpShardPrioritization() { - } + public NoOpShardPrioritization() {} @Override public List prioritize(List original) { diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java index b5796d96..02232ab2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java @@ -25,8 +25,7 @@ import java.util.Map; * It also limits number of shards that will be available for initialization based on their depth. * It doesn't make a lot of sense to work on a shard that has too many unfinished parents. */ -public class ParentsFirstShardPrioritization implements - ShardPrioritization { +public class ParentsFirstShardPrioritization implements ShardPrioritization { private static final SortingNode PROCESSING_NODE = new SortingNode(null, Integer.MIN_VALUE); private final int maxDepth; @@ -34,13 +33,13 @@ public class ParentsFirstShardPrioritization implements /** * Creates ParentFirst prioritization with filtering based on depth of the shard. * Shards that have depth > maxDepth will be ignored and will not be returned by this prioritization. - * + * * @param maxDepth any shard that is deeper than max depth, will be excluded from processing */ public ParentsFirstShardPrioritization(int maxDepth) { /* Depth 0 means that shard is completed or cannot be found, - * it is impossible to process such shards. - */ + * it is impossible to process such shards. + */ if (maxDepth <= 0) { throw new IllegalArgumentException("Max depth cannot be negative or zero. Provided value: " + maxDepth); } @@ -51,16 +50,13 @@ public class ParentsFirstShardPrioritization implements public List prioritize(List original) { Map shards = new HashMap<>(); for (ShardInfo shardInfo : original) { - shards.put(shardInfo.shardId(), - shardInfo); + shards.put(shardInfo.shardId(), shardInfo); } Map processedNodes = new HashMap<>(); for (ShardInfo shardInfo : original) { - populateDepth(shardInfo.shardId(), - shards, - processedNodes); + populateDepth(shardInfo.shardId(), shards, processedNodes); } List orderedInfos = new ArrayList<>(original.size()); @@ -77,14 +73,12 @@ public class ParentsFirstShardPrioritization implements return orderedInfos; } - private int populateDepth(String shardId, - Map shards, - Map processedNodes) { + private int populateDepth(String shardId, Map shards, Map processedNodes) { SortingNode processed = processedNodes.get(shardId); if (processed != null) { if (processed == PROCESSING_NODE) { - throw new IllegalArgumentException("Circular dependency detected. Shard Id " - + shardId + " is processed twice"); + throw new IllegalArgumentException( + "Circular dependency detected. Shard Id " + shardId + " is processed twice"); } return processed.getDepth(); } @@ -105,16 +99,11 @@ public class ParentsFirstShardPrioritization implements int maxParentDepth = 0; for (String parentId : shardInfo.parentShardIds()) { - maxParentDepth = Math.max(maxParentDepth, - populateDepth(parentId, - shards, - processedNodes)); + maxParentDepth = Math.max(maxParentDepth, populateDepth(parentId, shards, processedNodes)); } int currentNodeLevel = maxParentDepth + 1; - SortingNode previousValue = processedNodes.put(shardId, - new SortingNode(shardInfo, - currentNodeLevel)); + SortingNode previousValue = processedNodes.put(shardId, new SortingNode(shardInfo, currentNodeLevel)); if (previousValue != PROCESSING_NODE) { throw new IllegalStateException("Validation failed. Depth for shardId " + shardId + " was populated twice"); } @@ -125,13 +114,11 @@ public class ParentsFirstShardPrioritization implements /** * Class to store depth of shards during prioritization. */ - private static class SortingNode implements - Comparable { + private static class SortingNode implements Comparable { private final ShardInfo shardInfo; private final int depth; - public SortingNode(ShardInfo shardInfo, - int depth) { + public SortingNode(ShardInfo shardInfo, int depth) { this.shardInfo = shardInfo; this.depth = depth; } @@ -142,8 +129,7 @@ public class ParentsFirstShardPrioritization implements @Override public int compareTo(SortingNode o) { - return Integer.compare(depth, - o.depth); + return Integer.compare(depth, o.depth); } } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java index 62b93855..7eb065eb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java @@ -46,10 +46,20 @@ public interface ShardDetector { */ List listShards(); + /** + * This method behaves exactly similar to listShards except the fact that this does not consume and throw + * ResourceNotFoundException instead of returning empty list. + * + * @return Shards + */ + default List listShardsWithoutConsumingResourceNotFoundException() { + throw new UnsupportedOperationException("listShardsWithoutConsumingResourceNotFoundException not implemented"); + } + /** * List shards with shard filter. * - * @param ShardFilter + * @param shardFilter * @return Shards */ default List listShardsWithFilter(ShardFilter shardFilter) { @@ -83,7 +93,8 @@ public interface ShardDetector { * @throws ExecutionException * @throws TimeoutException */ - default List getChildShards(String shardId) throws InterruptedException, ExecutionException, TimeoutException { + default List getChildShards(String shardId) + throws InterruptedException, ExecutionException, TimeoutException { throw new UnsupportedOperationException("getChildShards not available."); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java index aff3f6f0..bb59cbb1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java @@ -20,13 +20,12 @@ import java.util.LinkedList; import java.util.List; import java.util.Optional; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - import lombok.Getter; import lombok.NonNull; import lombok.ToString; import lombok.experimental.Accessors; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; /** @@ -46,7 +45,7 @@ public class ShardInfo { /** * Creates a new ShardInfo object. The checkpoint is not part of the equality, but is used for debugging output. - * + * * @param shardId * Kinesis shardId that this will be about * @param concurrencyToken @@ -56,7 +55,8 @@ public class ShardInfo { * @param checkpoint * the latest checkpoint from lease */ - public ShardInfo(@NonNull final String shardId, + public ShardInfo( + @NonNull final String shardId, final String concurrencyToken, final Collection parentShardIds, final ExtendedSequenceNumber checkpoint) { @@ -72,7 +72,8 @@ public class ShardInfo { * @param checkpoint * @param streamIdentifierSer */ - public ShardInfo(@NonNull final String shardId, + public ShardInfo( + @NonNull final String shardId, final String concurrencyToken, final Collection parentShardIds, final ExtendedSequenceNumber checkpoint, @@ -92,7 +93,7 @@ public class ShardInfo { /** * A list of shards that are parents of this shard. This may be empty if the shard has no parents. - * + * * @return a list of shardId's that are parents of this shard, or empty if the shard has no parents. */ public List parentShardIds() { @@ -114,7 +115,11 @@ public class ShardInfo { @Override public int hashCode() { return new HashCodeBuilder() - .append(concurrencyToken).append(parentShardIds).append(shardId).append(streamIdentifierSerOpt.orElse("")).toHashCode(); + .append(concurrencyToken) + .append(parentShardIds) + .append(shardId) + .append(streamIdentifierSerOpt.orElse("")) + .toHashCode(); } /** @@ -137,10 +142,12 @@ public class ShardInfo { return false; } ShardInfo other = (ShardInfo) obj; - return new EqualsBuilder().append(concurrencyToken, other.concurrencyToken) - .append(parentShardIds, other.parentShardIds).append(shardId, other.shardId) - .append(streamIdentifierSerOpt.orElse(""), other.streamIdentifierSerOpt.orElse("")).isEquals(); - + return new EqualsBuilder() + .append(concurrencyToken, other.concurrencyToken) + .append(parentShardIds, other.parentShardIds) + .append(shardId, other.shardId) + .append(streamIdentifierSerOpt.orElse(""), other.streamIdentifierSerOpt.orElse("")) + .isEquals(); } /** @@ -159,9 +166,9 @@ public class ShardInfo { * @return lease key */ public static String getLeaseKey(ShardInfo shardInfo, String shardIdOverride) { - return shardInfo.streamIdentifierSerOpt().isPresent() ? - MultiStreamLease.getLeaseKey(shardInfo.streamIdentifierSerOpt().get(), shardIdOverride) : - shardIdOverride; + return shardInfo.streamIdentifierSerOpt().isPresent() + ? MultiStreamLease.getLeaseKey( + shardInfo.streamIdentifierSerOpt().get(), shardIdOverride) + : shardIdOverride; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java index dc99d413..934b454a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java @@ -24,7 +24,7 @@ public interface ShardPrioritization { /** * Returns new list of shards ordered based on their priority. * Resulted list may have fewer shards compared to original list - * + * * @param original * list of shards needed to be prioritized * @return new list that contains only shards that should be processed diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java index dd576114..be4fbf51 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java @@ -37,20 +37,25 @@ import software.amazon.kinesis.metrics.MetricsUtil; @Slf4j @KinesisClientInternalApi public class ShardSyncTask implements ConsumerTask { - private final String SHARD_SYNC_TASK_OPERATION = "ShardSyncTask"; + private static final String SHARD_SYNC_TASK_OPERATION = "ShardSyncTask"; @NonNull private final ShardDetector shardDetector; + @NonNull private final LeaseRefresher leaseRefresher; + @NonNull private final InitialPositionInStreamExtended initialPosition; + private final boolean cleanupLeasesUponShardCompletion; private final boolean garbageCollectLeases; private final boolean ignoreUnexpectedChildShards; private final long shardSyncTaskIdleTimeMillis; + @NonNull private final HierarchicalShardSyncer hierarchicalShardSyncer; + @NonNull private final MetricsFactory metricsFactory; @@ -67,8 +72,12 @@ public class ShardSyncTask implements ConsumerTask { boolean shardSyncSuccess = true; try { - boolean didPerformShardSync = hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, - initialPosition, scope, ignoreUnexpectedChildShards, + boolean didPerformShardSync = hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + leaseRefresher, + initialPosition, + scope, + ignoreUnexpectedChildShards, leaseRefresher.isLeaseTableEmpty()); if (didPerformShardSync && shardSyncTaskIdleTimeMillis > 0) { @@ -80,7 +89,8 @@ public class ShardSyncTask implements ConsumerTask { shardSyncSuccess = false; } finally { // NOTE: This metric is reflecting if a shard sync task succeeds. Customer can use this metric to monitor if - // their application encounter any shard sync failures. This metric can help to detect potential shard stuck issues + // their application encounter any shard sync failures. This metric can help to detect potential shard stuck + // issues // that are due to shard sync failures. MetricsUtil.addSuccess(scope, "SyncShards", shardSyncSuccess, MetricsLevel.DETAILED); MetricsUtil.endScope(scope); @@ -97,5 +107,4 @@ public class ShardSyncTask implements ConsumerTask { public TaskType taskType() { return taskType; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java index e03046a0..add8cf4f 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java @@ -20,17 +20,16 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantLock; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - import lombok.Data; import lombok.NonNull; import lombok.experimental.Accessors; import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.coordinator.ExecutorStateEvent; import software.amazon.kinesis.lifecycle.ConsumerTask; import software.amazon.kinesis.lifecycle.TaskResult; -import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.MetricsCollectingTaskDecorator; +import software.amazon.kinesis.metrics.MetricsFactory; /** * The ShardSyncTaskManager is used to track the task to sync shards with leases (create leases for new @@ -43,20 +42,27 @@ import software.amazon.kinesis.metrics.MetricsCollectingTaskDecorator; public class ShardSyncTaskManager { @NonNull private final ShardDetector shardDetector; + @NonNull private final LeaseRefresher leaseRefresher; + @NonNull private final InitialPositionInStreamExtended initialPositionInStream; + private final boolean cleanupLeasesUponShardCompletion; private final boolean garbageCollectLeases; private final boolean ignoreUnexpectedChildShards; private final long shardSyncIdleTimeMillis; + @NonNull private final ExecutorService executorService; + @NonNull private final HierarchicalShardSyncer hierarchicalShardSyncer; + @NonNull private final MetricsFactory metricsFactory; + private ConsumerTask currentTask; private CompletableFuture future; private AtomicBoolean shardSyncRequestPending; @@ -77,9 +83,14 @@ public class ShardSyncTaskManager { * @param metricsFactory */ @Deprecated - public ShardSyncTaskManager(ShardDetector shardDetector, LeaseRefresher leaseRefresher, - InitialPositionInStreamExtended initialPositionInStream, boolean cleanupLeasesUponShardCompletion, - boolean ignoreUnexpectedChildShards, long shardSyncIdleTimeMillis, ExecutorService executorService, + public ShardSyncTaskManager( + ShardDetector shardDetector, + LeaseRefresher leaseRefresher, + InitialPositionInStreamExtended initialPositionInStream, + boolean cleanupLeasesUponShardCompletion, + boolean ignoreUnexpectedChildShards, + long shardSyncIdleTimeMillis, + ExecutorService executorService, MetricsFactory metricsFactory) { this.shardDetector = shardDetector; this.leaseRefresher = leaseRefresher; @@ -108,10 +119,16 @@ public class ShardSyncTaskManager { * @param hierarchicalShardSyncer * @param metricsFactory */ - public ShardSyncTaskManager(ShardDetector shardDetector, LeaseRefresher leaseRefresher, - InitialPositionInStreamExtended initialPositionInStream, boolean cleanupLeasesUponShardCompletion, - boolean ignoreUnexpectedChildShards, long shardSyncIdleTimeMillis, ExecutorService executorService, - HierarchicalShardSyncer hierarchicalShardSyncer, MetricsFactory metricsFactory) { + public ShardSyncTaskManager( + ShardDetector shardDetector, + LeaseRefresher leaseRefresher, + InitialPositionInStreamExtended initialPositionInStream, + boolean cleanupLeasesUponShardCompletion, + boolean ignoreUnexpectedChildShards, + long shardSyncIdleTimeMillis, + ExecutorService executorService, + HierarchicalShardSyncer hierarchicalShardSyncer, + MetricsFactory metricsFactory) { this.shardDetector = shardDetector; this.leaseRefresher = leaseRefresher; this.initialPositionInStream = initialPositionInStream; @@ -131,15 +148,16 @@ public class ShardSyncTaskManager { * @return the Task Result. */ public TaskResult callShardSyncTask() { - final ShardSyncTask shardSyncTask = new ShardSyncTask(shardDetector, - leaseRefresher, - initialPositionInStream, - cleanupLeasesUponShardCompletion, - garbageCollectLeases, - ignoreUnexpectedChildShards, - shardSyncIdleTimeMillis, - hierarchicalShardSyncer, - metricsFactory); + final ShardSyncTask shardSyncTask = new ShardSyncTask( + shardDetector, + leaseRefresher, + initialPositionInStream, + cleanupLeasesUponShardCompletion, + garbageCollectLeases, + ignoreUnexpectedChildShards, + shardSyncIdleTimeMillis, + hierarchicalShardSyncer, + metricsFactory); final ConsumerTask metricCollectingTask = new MetricsCollectingTaskDecorator(shardSyncTask, metricsFactory); return metricCollectingTask.call(); } @@ -164,28 +182,27 @@ public class ShardSyncTaskManager { try { TaskResult result = future.get(); if (result.getException() != null) { - log.error("Caught exception running {} task: ", currentTask.taskType(), - result.getException()); + log.error("Caught exception running {} task: ", currentTask.taskType(), result.getException()); } } catch (InterruptedException | ExecutionException e) { log.warn("{} task encountered exception.", currentTask.taskType(), e); } } - currentTask = - new MetricsCollectingTaskDecorator( - new ShardSyncTask(shardDetector, - leaseRefresher, - initialPositionInStream, - cleanupLeasesUponShardCompletion, - garbageCollectLeases, - ignoreUnexpectedChildShards, - shardSyncIdleTimeMillis, - hierarchicalShardSyncer, - metricsFactory), - metricsFactory); + currentTask = new MetricsCollectingTaskDecorator( + new ShardSyncTask( + shardDetector, + leaseRefresher, + initialPositionInStream, + cleanupLeasesUponShardCompletion, + garbageCollectLeases, + ignoreUnexpectedChildShards, + shardSyncIdleTimeMillis, + hierarchicalShardSyncer, + metricsFactory), + metricsFactory); future = CompletableFuture.supplyAsync(() -> currentTask.call(), executorService) - .whenComplete((taskResult, exception) -> handlePendingShardSyncs(exception, taskResult)); + .whenComplete((taskResult, exception) -> handlePendingShardSyncs(exception, taskResult)); log.info(new ExecutorStateEvent(executorService).message()); @@ -195,8 +212,10 @@ public class ShardSyncTaskManager { } } else { if (log.isDebugEnabled()) { - log.debug("Previous {} task still pending. Not submitting new task. " - + "Enqueued a request that will be executed when the current request completes.", currentTask.taskType()); + log.debug( + "Previous {} task still pending. Not submitting new task. " + + "Triggered a pending request but will not be executed until the current request completes.", + currentTask.taskType()); } shardSyncRequestPending.compareAndSet(false /*expected*/, true /*update*/); } @@ -205,27 +224,10 @@ public class ShardSyncTaskManager { private void handlePendingShardSyncs(Throwable exception, TaskResult taskResult) { if (exception != null || taskResult.getException() != null) { - log.error("Caught exception running {} task: ", currentTask.taskType(), exception != null ? exception : taskResult.getException()); - } - // Acquire lock here. If shardSyncRequestPending is false in this completionStage and - // submitShardSyncTask is invoked, before completion stage exits (future completes) - // but right after the value of shardSyncRequestPending is checked, it will result in - // shardSyncRequestPending being set to true, but no pending futures to trigger the next - // ShardSyncTask. By executing this stage in a Reentrant lock, we ensure that if the - // previous task is in this completion stage, checkAndSubmitNextTask is not invoked - // until this completionStage exits. - try { - lock.lock(); - if (shardSyncRequestPending.get()) { - shardSyncRequestPending.set(false); - // reset future to null, so next call creates a new one - // without trying to get results from the old future. - future = null; - checkAndSubmitNextTask(); - } - } finally { - lock.unlock(); + log.error( + "Caught exception running {} task: {}", + currentTask.taskType(), + exception != null ? exception : taskResult.getException()); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/UpdateField.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/UpdateField.java index 9461a18e..4ce3f1a9 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/UpdateField.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/UpdateField.java @@ -22,5 +22,6 @@ package software.amazon.kinesis.leases; * for backfilling while rolling forward to newer versions. */ public enum UpdateField { - CHILD_SHARDS, HASH_KEY_RANGE + CHILD_SHARDS, + HASH_KEY_RANGE } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java index 07e9068d..bef76ef0 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java @@ -14,8 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - import java.util.Collection; import java.util.Collections; import java.util.List; @@ -30,10 +28,13 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseCoordinator; +import software.amazon.kinesis.leases.LeaseManagementConfig; import software.amazon.kinesis.leases.LeaseRefresher; import software.amazon.kinesis.leases.LeaseRenewer; import software.amazon.kinesis.leases.LeaseTaker; @@ -62,9 +63,13 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { // Time to wait for in-flight Runnables to finish when calling .stop(); private static final long STOP_WAIT_TIME_MILLIS = 2000L; private static final ThreadFactory LEASE_COORDINATOR_THREAD_FACTORY = new ThreadFactoryBuilder() - .setNameFormat("LeaseCoordinator-%04d").setDaemon(true).build(); + .setNameFormat("LeaseCoordinator-%04d") + .setDaemon(true) + .build(); private static final ThreadFactory LEASE_RENEWAL_THREAD_FACTORY = new ThreadFactoryBuilder() - .setNameFormat("LeaseRenewer-%04d").setDaemon(true).build(); + .setNameFormat("LeaseRenewer-%04d") + .setDaemon(true) + .build(); private final LeaseRenewer leaseRenewer; private final LeaseTaker leaseTaker; @@ -104,18 +109,26 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { * Used to publish metrics about lease operations */ @Deprecated - public DynamoDBLeaseCoordinator(final LeaseRefresher leaseRefresher, - final String workerIdentifier, - final long leaseDurationMillis, - final long epsilonMillis, - final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, - final int maxLeaseRenewerThreadCount, - final MetricsFactory metricsFactory) { - this(leaseRefresher, workerIdentifier, leaseDurationMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewerThreadCount, + public DynamoDBLeaseCoordinator( + final LeaseRefresher leaseRefresher, + final String workerIdentifier, + final long leaseDurationMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewerThreadCount, + final MetricsFactory metricsFactory) { + this( + leaseRefresher, + workerIdentifier, + leaseDurationMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewerThreadCount, TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, - TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); } /** @@ -140,21 +153,74 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { * @param metricsFactory * Used to publish metrics about lease operations */ - public DynamoDBLeaseCoordinator(final LeaseRefresher leaseRefresher, - final String workerIdentifier, - final long leaseDurationMillis, - final long epsilonMillis, - final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, - final int maxLeaseRenewerThreadCount, - final long initialLeaseTableReadCapacity, - final long initialLeaseTableWriteCapacity, - final MetricsFactory metricsFactory) { + @Deprecated + public DynamoDBLeaseCoordinator( + final LeaseRefresher leaseRefresher, + final String workerIdentifier, + final long leaseDurationMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewerThreadCount, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final MetricsFactory metricsFactory) { + this( + leaseRefresher, + workerIdentifier, + leaseDurationMillis, + LeaseManagementConfig.DEFAULT_ENABLE_PRIORITY_LEASE_ASSIGNMENT, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewerThreadCount, + TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, + TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); + } + + /** + * Constructor. + * + * @param leaseRefresher + * LeaseRefresher instance to use + * @param workerIdentifier + * Identifies the worker (e.g. useful to track lease ownership) + * @param leaseDurationMillis + * Duration of a lease + * @param enablePriorityLeaseAssignment + * Whether to enable priority lease assignment for very expired leases + * @param epsilonMillis + * Allow for some variance when calculating lease expirations + * @param maxLeasesForWorker + * Max leases this Worker can handle at a time + * @param maxLeasesToStealAtOneTime + * Steal up to these many leases at a time (for load balancing) + * @param initialLeaseTableReadCapacity + * Initial dynamodb lease table read iops if creating the lease table + * @param initialLeaseTableWriteCapacity + * Initial dynamodb lease table write iops if creating the lease table + * @param metricsFactory + * Used to publish metrics about lease operations + */ + public DynamoDBLeaseCoordinator( + final LeaseRefresher leaseRefresher, + final String workerIdentifier, + final long leaseDurationMillis, + final boolean enablePriorityLeaseAssignment, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewerThreadCount, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final MetricsFactory metricsFactory) { this.leaseRefresher = leaseRefresher; this.leaseRenewalThreadpool = getLeaseRenewalExecutorService(maxLeaseRenewerThreadCount); this.leaseTaker = new DynamoDBLeaseTaker(leaseRefresher, workerIdentifier, leaseDurationMillis, metricsFactory) .withMaxLeasesForWorker(maxLeasesForWorker) - .withMaxLeasesToStealAtOneTime(maxLeasesToStealAtOneTime); + .withMaxLeasesToStealAtOneTime(maxLeasesToStealAtOneTime) + .withEnablePriorityLeaseAssignment(enablePriorityLeaseAssignment); this.leaseRenewer = new DynamoDBLeaseRenewer( leaseRefresher, workerIdentifier, leaseDurationMillis, leaseRenewalThreadpool, metricsFactory); this.renewerIntervalMillis = getRenewerTakerIntervalMillis(leaseDurationMillis, epsilonMillis); @@ -169,7 +235,8 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { this.initialLeaseTableWriteCapacity = initialLeaseTableWriteCapacity; this.metricsFactory = metricsFactory; - log.info("With failover time {} ms and epsilon {} ms, LeaseCoordinator will renew leases every {} ms, take" + log.info( + "With failover time {} ms and epsilon {} ms, LeaseCoordinator will renew leases every {} ms, take" + "leases every {} ms, process maximum of {} leases and steal {} lease(s) at a time.", leaseDurationMillis, epsilonMillis, @@ -191,7 +258,6 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { log.error("Throwable encountered in lease taking thread", t); } } - } private class RenewerRunnable implements Runnable { @@ -206,13 +272,11 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { log.error("Throwable encountered in lease renewing thread", t); } } - } @Override public void initialize() throws ProvisionedThroughputException, DependencyException, IllegalStateException { - final boolean newTableCreated = - leaseRefresher.createLeaseTableIfNotExists(); + final boolean newTableCreated = leaseRefresher.createLeaseTableIfNotExists(); if (newTableCreated) { log.info("Created new lease table for coordinator with pay per request billing mode."); } @@ -233,15 +297,11 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { leaseCoordinatorThreadPool = Executors.newScheduledThreadPool(2, LEASE_COORDINATOR_THREAD_FACTORY); // Taker runs with fixed DELAY because we want it to run slower in the event of performance degredation. - takerFuture = leaseCoordinatorThreadPool.scheduleWithFixedDelay(new TakerRunnable(), - 0L, - takerIntervalMillis, - TimeUnit.MILLISECONDS); + takerFuture = leaseCoordinatorThreadPool.scheduleWithFixedDelay( + new TakerRunnable(), 0L, takerIntervalMillis, TimeUnit.MILLISECONDS); // Renewer runs at fixed INTERVAL because we want it to run at the same rate in the event of degredation. - leaseCoordinatorThreadPool.scheduleAtFixedRate(new RenewerRunnable(), - 0L, - renewerIntervalMillis, - TimeUnit.MILLISECONDS); + leaseCoordinatorThreadPool.scheduleAtFixedRate( + new RenewerRunnable(), 0L, renewerIntervalMillis, TimeUnit.MILLISECONDS); running = true; } @@ -305,11 +365,13 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { leaseCoordinatorThreadPool.shutdown(); try { if (leaseCoordinatorThreadPool.awaitTermination(STOP_WAIT_TIME_MILLIS, TimeUnit.MILLISECONDS)) { - log.info("Worker {} has successfully stopped lease-tracking threads", + log.info( + "Worker {} has successfully stopped lease-tracking threads", leaseTaker.getWorkerIdentifier()); } else { leaseCoordinatorThreadPool.shutdownNow(); - log.info("Worker {} stopped lease-tracking threads {} ms after stop", + log.info( + "Worker {} stopped lease-tracking threads {} ms after stop", leaseTaker.getWorkerIdentifier(), STOP_WAIT_TIME_MILLIS); } @@ -329,8 +391,9 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { @Override public void stopLeaseTaker() { - takerFuture.cancel(false); - + if (takerFuture != null) { + takerFuture.cancel(false); + } } @Override @@ -348,8 +411,9 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { } @Override - public boolean updateLease(final Lease lease, final UUID concurrencyToken, final String operation, - final String singleStreamShardId) throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public boolean updateLease( + final Lease lease, final UUID concurrencyToken, final String operation, final String singleStreamShardId) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { return leaseRenewer.updateLease(lease, concurrencyToken, operation, singleStreamShardId); } @@ -361,8 +425,13 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { private static ExecutorService getLeaseRenewalExecutorService(int maximumPoolSize) { int coreLeaseCount = Math.max(maximumPoolSize / 4, 2); - return new ThreadPoolExecutor(coreLeaseCount, maximumPoolSize, 60, TimeUnit.SECONDS, - new LinkedTransferQueue<>(), LEASE_RENEWAL_THREAD_FACTORY); + return new ThreadPoolExecutor( + coreLeaseCount, + maximumPoolSize, + 60, + TimeUnit.SECONDS, + new LinkedTransferQueue<>(), + LEASE_RENEWAL_THREAD_FACTORY); } @Override @@ -375,7 +444,9 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { if (leases == null) { return Collections.emptyList(); } - return leases.stream().map(DynamoDBLeaseCoordinator::convertLeaseToAssignment).collect(Collectors.toList()); + return leases.stream() + .map(DynamoDBLeaseCoordinator::convertLeaseToAssignment) + .collect(Collectors.toList()); } /** @@ -385,11 +456,15 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { */ public static ShardInfo convertLeaseToAssignment(final Lease lease) { if (lease instanceof MultiStreamLease) { - return new ShardInfo(((MultiStreamLease) lease).shardId(), lease.concurrencyToken().toString(), lease.parentShardIds(), - lease.checkpoint(), ((MultiStreamLease) lease).streamIdentifier()); + return new ShardInfo( + ((MultiStreamLease) lease).shardId(), + lease.concurrencyToken().toString(), + lease.parentShardIds(), + lease.checkpoint(), + ((MultiStreamLease) lease).streamIdentifier()); } else { - return new ShardInfo(lease.leaseKey(), lease.concurrencyToken().toString(), lease.parentShardIds(), - lease.checkpoint()); + return new ShardInfo( + lease.leaseKey(), lease.concurrencyToken().toString(), lease.parentShardIds(), lease.checkpoint()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java index ad1a2300..e5435bfc 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java @@ -16,19 +16,24 @@ package software.amazon.kinesis.leases.dynamodb; import java.time.Duration; +import java.util.Collection; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.function.Function; + import lombok.Data; import lombok.NonNull; +import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.BillingMode; +import software.amazon.awssdk.services.dynamodb.model.Tag; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.LeaseCleanupConfig; import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.coordinator.DeletedStreamListProvider; import software.amazon.kinesis.leases.HierarchicalShardSyncer; import software.amazon.kinesis.leases.KinesisShardDetector; import software.amazon.kinesis.leases.LeaseCleanupManager; @@ -49,24 +54,32 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { @NonNull private final KinesisAsyncClient kinesisClient; + @NonNull private final DynamoDbAsyncClient dynamoDBClient; + @NonNull private final String tableName; + @NonNull private final String workerIdentifier; + @NonNull private final ExecutorService executorService; + @NonNull private final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer; + @NonNull private final LeaseSerializer leaseSerializer; + @NonNull private StreamConfig streamConfig; private Function customShardDetectorProvider; private final long failoverTimeMillis; + private final boolean enablePriorityLeaseAssignment; private final long epsilonMillis; private final int maxLeasesForWorker; private final int maxLeasesToStealAtOneTime; @@ -85,6 +98,9 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { private final TableCreatorCallback tableCreatorCallback; private final Duration dynamoDbRequestTimeout; private final BillingMode billingMode; + private final boolean leaseTableDeletionProtectionEnabled; + private final boolean leaseTablePitrEnabled; + private final Collection tags; private final boolean isMultiStreamMode; private final LeaseCleanupConfig leaseCleanupConfig; @@ -116,21 +132,51 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param cacheMissWarningModulus */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus) { + this( + kinesisClient, + streamName, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + initialPositionInStream, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY); } @@ -166,23 +212,56 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param initialLeaseTableWriteCapacity */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - new HierarchicalShardSyncer(), TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK, + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity) { + this( + kinesisClient, + streamName, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + initialPositionInStream, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + new HierarchicalShardSyncer(), + TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); } @@ -216,24 +295,59 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param tableCreatorCallback */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer hierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - hierarchicalShardSyncer, tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer hierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback) { + this( + kinesisClient, + streamName, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + initialPositionInStream, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + hierarchicalShardSyncer, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); } /** @@ -267,25 +381,61 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param dynamoDbRequestTimeout */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer hierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer hierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, Duration dynamoDbRequestTimeout) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - hierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, BillingMode.PAY_PER_REQUEST); + this( + kinesisClient, + streamName, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + initialPositionInStream, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + hierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + BillingMode.PAY_PER_REQUEST); } /** @@ -320,26 +470,156 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param billingMode */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer hierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, - Duration dynamoDbRequestTimeout, BillingMode billingMode) { + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer hierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode) { - this(kinesisClient, new StreamConfig(StreamIdentifier.singleStreamInstance(streamName), initialPositionInStream), dynamoDBClient, tableName, - workerIdentifier, executorService, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - hierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, new DynamoDBLeaseSerializer()); + this( + kinesisClient, + new StreamConfig(StreamIdentifier.singleStreamInstance(streamName), initialPositionInStream), + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + hierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + new DynamoDBLeaseSerializer()); + } + + /** + * Constructor. + * + * @param kinesisClient + * @param streamName + * @param dynamoDBClient + * @param tableName + * @param workerIdentifier + * @param executorService + * @param initialPositionInStream + * @param failoverTimeMillis + * @param epsilonMillis + * @param maxLeasesForWorker + * @param maxLeasesToStealAtOneTime + * @param maxLeaseRenewalThreads + * @param cleanupLeasesUponShardCompletion + * @param ignoreUnexpectedChildShards + * @param shardSyncIntervalMillis + * @param consistentReads + * @param listShardsBackoffTimeMillis + * @param maxListShardsRetryAttempts + * @param maxCacheMissesBeforeReload + * @param listShardsCacheAllowedAgeInSeconds + * @param cacheMissWarningModulus + * @param initialLeaseTableReadCapacity + * @param initialLeaseTableWriteCapacity + * @param hierarchicalShardSyncer + * @param tableCreatorCallback + * @param dynamoDbRequestTimeout + * @param billingMode + * @param tags + */ + @Deprecated + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer hierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + Collection tags) { + + this( + kinesisClient, + new StreamConfig(StreamIdentifier.singleStreamInstance(streamName), initialPositionInStream), + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + hierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + new DynamoDBLeaseSerializer()); } /** @@ -372,25 +652,162 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param dynamoDbRequestTimeout * @param billingMode */ - private DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final StreamConfig streamConfig, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final long failoverTimeMillis, final long epsilonMillis, - final int maxLeasesForWorker, final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, - Duration dynamoDbRequestTimeout, BillingMode billingMode, LeaseSerializer leaseSerializer) { - this(kinesisClient, dynamoDBClient, tableName, - workerIdentifier, executorService, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - deprecatedHierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, leaseSerializer, - null, false, LeaseManagementConfig.DEFAULT_LEASE_CLEANUP_CONFIG); + @Deprecated + private DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final StreamConfig streamConfig, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + LeaseSerializer leaseSerializer) { + this( + kinesisClient, + streamConfig, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + deprecatedHierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + LeaseManagementConfig.DEFAULT_LEASE_TABLE_DELETION_PROTECTION_ENABLED, + DefaultSdkAutoConstructList.getInstance(), + leaseSerializer); + } + + /** + * Constructor. + * + * @param kinesisClient + * @param streamConfig + * @param dynamoDBClient + * @param tableName + * @param workerIdentifier + * @param executorService + * @param failoverTimeMillis + * @param epsilonMillis + * @param maxLeasesForWorker + * @param maxLeasesToStealAtOneTime + * @param maxLeaseRenewalThreads + * @param cleanupLeasesUponShardCompletion + * @param ignoreUnexpectedChildShards + * @param shardSyncIntervalMillis + * @param consistentReads + * @param listShardsBackoffTimeMillis + * @param maxListShardsRetryAttempts + * @param maxCacheMissesBeforeReload + * @param listShardsCacheAllowedAgeInSeconds + * @param cacheMissWarningModulus + * @param initialLeaseTableReadCapacity + * @param initialLeaseTableWriteCapacity + * @param deprecatedHierarchicalShardSyncer + * @param tableCreatorCallback + * @param dynamoDbRequestTimeout + * @param billingMode + * @param leaseTableDeletionProtectionEnabled + * @param tags + */ + @Deprecated + private DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final StreamConfig streamConfig, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + Collection tags, + LeaseSerializer leaseSerializer) { + this( + kinesisClient, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + deprecatedHierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + tags, + leaseSerializer, + null, + false, + LeaseManagementConfig.DEFAULT_LEASE_CLEANUP_CONFIG); this.streamConfig = streamConfig; } @@ -421,23 +838,217 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param tableCreatorCallback * @param dynamoDbRequestTimeout * @param billingMode + * @param leaseTableDeletionProtectionEnabled * @param leaseSerializer * @param customShardDetectorProvider * @param isMultiStreamMode * @param leaseCleanupConfig */ - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final long failoverTimeMillis, final long epsilonMillis, - final int maxLeasesForWorker, final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, - Duration dynamoDbRequestTimeout, BillingMode billingMode, LeaseSerializer leaseSerializer, - Function customShardDetectorProvider, boolean isMultiStreamMode, + @Deprecated + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + Collection tags, + LeaseSerializer leaseSerializer, + Function customShardDetectorProvider, + boolean isMultiStreamMode, + LeaseCleanupConfig leaseCleanupConfig) { + this( + kinesisClient, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + LeaseManagementConfig.DEFAULT_ENABLE_PRIORITY_LEASE_ASSIGNMENT, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + deprecatedHierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + tags, + leaseSerializer, + customShardDetectorProvider, + isMultiStreamMode, + leaseCleanupConfig); + } + + /** + * Constructor. + * @param kinesisClient + * @param dynamoDBClient + * @param tableName + * @param workerIdentifier + * @param executorService + * @param failoverTimeMillis + * @param enablePriorityLeaseAssignment + * @param epsilonMillis + * @param maxLeasesForWorker + * @param maxLeasesToStealAtOneTime + * @param maxLeaseRenewalThreads + * @param cleanupLeasesUponShardCompletion + * @param ignoreUnexpectedChildShards + * @param shardSyncIntervalMillis + * @param consistentReads + * @param listShardsBackoffTimeMillis + * @param maxListShardsRetryAttempts + * @param maxCacheMissesBeforeReload + * @param listShardsCacheAllowedAgeInSeconds + * @param cacheMissWarningModulus + * @param initialLeaseTableReadCapacity + * @param initialLeaseTableWriteCapacity + * @param deprecatedHierarchicalShardSyncer + * @param tableCreatorCallback + * @param dynamoDbRequestTimeout + * @param billingMode + * @param leaseTableDeletionProtectionEnabled + * @param leaseSerializer + * @param customShardDetectorProvider + * @param isMultiStreamMode + * @param leaseCleanupConfig + */ + @Deprecated + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final long failoverTimeMillis, + final boolean enablePriorityLeaseAssignment, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + Collection tags, + LeaseSerializer leaseSerializer, + Function customShardDetectorProvider, + boolean isMultiStreamMode, + LeaseCleanupConfig leaseCleanupConfig) { + this( + kinesisClient, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + enablePriorityLeaseAssignment, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + deprecatedHierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + LeaseManagementConfig.DEFAULT_LEASE_TABLE_PITR_ENABLED, + tags, + leaseSerializer, + customShardDetectorProvider, + isMultiStreamMode, + leaseCleanupConfig); + } + + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final long failoverTimeMillis, + final boolean enablePriorityLeaseAssignment, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + final boolean leaseTablePitrEnabled, + Collection tags, + LeaseSerializer leaseSerializer, + Function customShardDetectorProvider, + boolean isMultiStreamMode, LeaseCleanupConfig leaseCleanupConfig) { this.kinesisClient = kinesisClient; this.dynamoDBClient = dynamoDBClient; @@ -445,6 +1056,7 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { this.workerIdentifier = workerIdentifier; this.executorService = executorService; this.failoverTimeMillis = failoverTimeMillis; + this.enablePriorityLeaseAssignment = enablePriorityLeaseAssignment; this.epsilonMillis = epsilonMillis; this.maxLeasesForWorker = maxLeasesForWorker; this.maxLeasesToStealAtOneTime = maxLeasesToStealAtOneTime; @@ -464,17 +1076,22 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { this.tableCreatorCallback = tableCreatorCallback; this.dynamoDbRequestTimeout = dynamoDbRequestTimeout; this.billingMode = billingMode; + this.leaseTableDeletionProtectionEnabled = leaseTableDeletionProtectionEnabled; + this.leaseTablePitrEnabled = leaseTablePitrEnabled; this.leaseSerializer = leaseSerializer; this.customShardDetectorProvider = customShardDetectorProvider; this.isMultiStreamMode = isMultiStreamMode; this.leaseCleanupConfig = leaseCleanupConfig; + this.tags = tags; } @Override public LeaseCoordinator createLeaseCoordinator(@NonNull final MetricsFactory metricsFactory) { - return new DynamoDBLeaseCoordinator(this.createLeaseRefresher(), + return new DynamoDBLeaseCoordinator( + this.createLeaseRefresher(), workerIdentifier, failoverTimeMillis, + enablePriorityLeaseAssignment, epsilonMillis, maxLeasesForWorker, maxLeasesToStealAtOneTime, @@ -484,15 +1101,18 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { metricsFactory); } - @Override @Deprecated + @Override + @Deprecated public ShardSyncTaskManager createShardSyncTaskManager(@NonNull final MetricsFactory metricsFactory) { - return new ShardSyncTaskManager(this.createShardDetector(), + return new ShardSyncTaskManager( + this.createShardDetector(), this.createLeaseRefresher(), streamConfig.initialPositionInStreamExtended(), cleanupLeasesUponShardCompletion, ignoreUnexpectedChildShards, shardSyncIntervalMillis, - executorService, deprecatedHierarchicalShardSyncer, + executorService, + deprecatedHierarchicalShardSyncer, metricsFactory); } @@ -504,29 +1124,62 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { */ @Override public ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory, StreamConfig streamConfig) { - return new ShardSyncTaskManager(this.createShardDetector(streamConfig), + return createShardSyncTaskManager(metricsFactory, streamConfig, null); + } + + /** + * Create ShardSyncTaskManager from the streamConfig passed + * + * @param metricsFactory - factory to get metrics object + * @param streamConfig - streamConfig for which ShardSyncTaskManager needs to be created + * @param deletedStreamListProvider - store for capturing the streams which are deleted in kinesis + * @return ShardSyncTaskManager + */ + @Override + public ShardSyncTaskManager createShardSyncTaskManager( + MetricsFactory metricsFactory, + StreamConfig streamConfig, + DeletedStreamListProvider deletedStreamListProvider) { + return new ShardSyncTaskManager( + this.createShardDetector(streamConfig), this.createLeaseRefresher(), streamConfig.initialPositionInStreamExtended(), cleanupLeasesUponShardCompletion, ignoreUnexpectedChildShards, shardSyncIntervalMillis, executorService, - new HierarchicalShardSyncer(isMultiStreamMode, streamConfig.streamIdentifier().toString()), + new HierarchicalShardSyncer( + isMultiStreamMode, streamConfig.streamIdentifier().toString(), deletedStreamListProvider), metricsFactory); } @Override public DynamoDBLeaseRefresher createLeaseRefresher() { - return new DynamoDBLeaseRefresher(tableName, dynamoDBClient, leaseSerializer, consistentReads, - tableCreatorCallback, dynamoDbRequestTimeout, billingMode); + return new DynamoDBLeaseRefresher( + tableName, + dynamoDBClient, + leaseSerializer, + consistentReads, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + leaseTablePitrEnabled, + tags); } @Override @Deprecated public ShardDetector createShardDetector() { - return new KinesisShardDetector(kinesisClient, streamConfig.streamIdentifier(), - listShardsBackoffTimeMillis, maxListShardsRetryAttempts, listShardsCacheAllowedAgeInSeconds, - maxCacheMissesBeforeReload, cacheMissWarningModulus, dynamoDbRequestTimeout); + return new KinesisShardDetector( + kinesisClient, + streamConfig.streamIdentifier(), + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + listShardsCacheAllowedAgeInSeconds, + maxCacheMissesBeforeReload, + cacheMissWarningModulus, + dynamoDbRequestTimeout); } /** @@ -537,10 +1190,17 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { */ @Override public ShardDetector createShardDetector(StreamConfig streamConfig) { - return customShardDetectorProvider != null ? customShardDetectorProvider.apply(streamConfig) : - new KinesisShardDetector(kinesisClient, streamConfig.streamIdentifier(), listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, listShardsCacheAllowedAgeInSeconds, maxCacheMissesBeforeReload, - cacheMissWarningModulus, dynamoDbRequestTimeout); + return customShardDetectorProvider != null + ? customShardDetectorProvider.apply(streamConfig) + : new KinesisShardDetector( + kinesisClient, + streamConfig.streamIdentifier(), + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + listShardsCacheAllowedAgeInSeconds, + maxCacheMissesBeforeReload, + cacheMissWarningModulus, + dynamoDbRequestTimeout); } /** @@ -551,9 +1211,12 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { */ @Override public LeaseCleanupManager createLeaseCleanupManager(MetricsFactory metricsFactory) { - return new LeaseCleanupManager(createLeaseCoordinator(metricsFactory), - metricsFactory, Executors.newSingleThreadScheduledExecutor(), - cleanupLeasesUponShardCompletion, leaseCleanupConfig.leaseCleanupIntervalMillis(), + return new LeaseCleanupManager( + createLeaseCoordinator(metricsFactory), + metricsFactory, + Executors.newSingleThreadScheduledExecutor(), + cleanupLeasesUponShardCompletion, + leaseCleanupConfig.leaseCleanupIntervalMillis(), leaseCleanupConfig.completedLeaseCleanupIntervalMillis(), leaseCleanupConfig.garbageLeaseCleanupIntervalMillis()); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java index acb61a38..123f4068 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java @@ -14,17 +14,19 @@ */ package software.amazon.kinesis.leases.dynamodb; -import com.google.common.collect.ImmutableMap; - import java.time.Duration; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; + +import com.google.common.collect.ImmutableMap; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; @@ -46,6 +48,8 @@ import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; import software.amazon.awssdk.services.dynamodb.model.ScanRequest; import software.amazon.awssdk.services.dynamodb.model.ScanResponse; import software.amazon.awssdk.services.dynamodb.model.TableStatus; +import software.amazon.awssdk.services.dynamodb.model.Tag; +import software.amazon.awssdk.services.dynamodb.model.UpdateContinuousBackupsRequest; import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.kinesis.annotations.KinesisClientInternalApi; @@ -77,6 +81,9 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { private final Duration dynamoDbRequestTimeout; private final BillingMode billingMode; + private final boolean leaseTableDeletionProtectionEnabled; + private final boolean leaseTablePitrEnabled; + private final Collection tags; private boolean newTableCreated = false; @@ -96,8 +103,11 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @param consistentReads */ @Deprecated - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads) { + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads) { this(table, dynamoDBClient, serializer, consistentReads, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); } @@ -111,10 +121,19 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @param tableCreatorCallback */ @Deprecated - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads, - @NonNull final TableCreatorCallback tableCreatorCallback) { - this(table, dynamoDBClient, serializer, consistentReads, tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads, + @NonNull final TableCreatorCallback tableCreatorCallback) { + this( + table, + dynamoDBClient, + serializer, + consistentReads, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); } /** @@ -127,10 +146,22 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @param dynamoDbRequestTimeout */ @Deprecated - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads, - @NonNull final TableCreatorCallback tableCreatorCallback, Duration dynamoDbRequestTimeout) { - this(table, dynamoDBClient, serializer, consistentReads, tableCreatorCallback, dynamoDbRequestTimeout, BillingMode.PAY_PER_REQUEST); + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads, + @NonNull final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout) { + this( + table, + dynamoDBClient, + serializer, + consistentReads, + tableCreatorCallback, + dynamoDbRequestTimeout, + BillingMode.PAY_PER_REQUEST, + LeaseManagementConfig.DEFAULT_LEASE_TABLE_DELETION_PROTECTION_ENABLED); } /** @@ -142,11 +173,88 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @param tableCreatorCallback * @param dynamoDbRequestTimeout * @param billingMode + * @param leaseTableDeletionProtectionEnabled */ - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads, - @NonNull final TableCreatorCallback tableCreatorCallback, Duration dynamoDbRequestTimeout, - final BillingMode billingMode) { + @Deprecated + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads, + @NonNull final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + final BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled) { + this( + table, + dynamoDBClient, + serializer, + consistentReads, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + DefaultSdkAutoConstructList.getInstance()); + } + + /** + * Constructor. + * @param table + * @param dynamoDBClient + * @param serializer + * @param consistentReads + * @param tableCreatorCallback + * @param dynamoDbRequestTimeout + * @param billingMode + * @param leaseTableDeletionProtectionEnabled + * @param tags + */ + @Deprecated + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads, + @NonNull final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + final BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + final Collection tags) { + this( + table, + dynamoDBClient, + serializer, + consistentReads, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + LeaseManagementConfig.DEFAULT_LEASE_TABLE_PITR_ENABLED, + tags); + } + + /** + * Constructor. + * @param table + * @param dynamoDBClient + * @param serializer + * @param consistentReads + * @param tableCreatorCallback + * @param dynamoDbRequestTimeout + * @param billingMode + * @param leaseTableDeletionProtectionEnabled + */ + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads, + @NonNull final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + final BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + final boolean leaseTablePitrEnabled, + final Collection tags) { this.table = table; this.dynamoDBClient = dynamoDBClient; this.serializer = serializer; @@ -154,6 +262,9 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { this.tableCreatorCallback = tableCreatorCallback; this.dynamoDbRequestTimeout = dynamoDbRequestTimeout; this.billingMode = billingMode; + this.leaseTableDeletionProtectionEnabled = leaseTableDeletionProtectionEnabled; + this.leaseTablePitrEnabled = leaseTablePitrEnabled; + this.tags = tags; } /** @@ -162,33 +273,51 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { @Override public boolean createLeaseTableIfNotExists(@NonNull final Long readCapacity, @NonNull final Long writeCapacity) throws ProvisionedThroughputException, DependencyException { - ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(readCapacity) - .writeCapacityUnits(writeCapacity).build(); - final CreateTableRequest request; - if(BillingMode.PAY_PER_REQUEST.equals(billingMode)){ - request = CreateTableRequest.builder().tableName(table).keySchema(serializer.getKeySchema()) - .attributeDefinitions(serializer.getAttributeDefinitions()) - .billingMode(billingMode).build(); - } else { - request = CreateTableRequest.builder().tableName(table).keySchema(serializer.getKeySchema()) - .attributeDefinitions(serializer.getAttributeDefinitions()).provisionedThroughput(throughput) + final CreateTableRequest.Builder builder = createTableRequestBuilder(); + if (BillingMode.PROVISIONED.equals(billingMode)) { + ProvisionedThroughput throughput = ProvisionedThroughput.builder() + .readCapacityUnits(readCapacity) + .writeCapacityUnits(writeCapacity) .build(); + builder.provisionedThroughput(throughput); } - - return createTableIfNotExists(request); + return createTableIfNotExists(builder.build()); } /** * {@inheritDoc} */ @Override - public boolean createLeaseTableIfNotExists() - throws ProvisionedThroughputException, DependencyException { - final CreateTableRequest request = CreateTableRequest.builder().tableName(table).keySchema(serializer.getKeySchema()) - .attributeDefinitions(serializer.getAttributeDefinitions()) - .billingMode(billingMode).build(); + public boolean createLeaseTableIfNotExists() throws ProvisionedThroughputException, DependencyException { + final CreateTableRequest request = createTableRequestBuilder().build(); - return createTableIfNotExists(request); + boolean tableExists = createTableIfNotExists(request); + + if (leaseTablePitrEnabled) { + enablePitr(); + log.info("Enabled PITR on table {}", table); + } + + return tableExists; + } + + private void enablePitr() throws DependencyException { + final UpdateContinuousBackupsRequest request = UpdateContinuousBackupsRequest.builder() + .tableName(table) + .pointInTimeRecoverySpecification(builder -> builder.pointInTimeRecoveryEnabled(true)) + .build(); + + final AWSExceptionManager exceptionManager = createExceptionManager(); + exceptionManager.add(ResourceNotFoundException.class, t -> t); + exceptionManager.add(ProvisionedThroughputExceededException.class, t -> t); + + try { + FutureUtils.resolveOrCancelFuture(dynamoDBClient.updateContinuousBackups(request), dynamoDbRequestTimeout); + } catch (ExecutionException e) { + throw exceptionManager.apply(e.getCause()); + } catch (InterruptedException | DynamoDbException | TimeoutException e) { + throw new DependencyException(e); + } } private boolean createTableIfNotExists(CreateTableRequest request) @@ -238,7 +367,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { } private TableStatus tableStatus() throws DependencyException { - DescribeTableRequest request = DescribeTableRequest.builder().tableName(table).build(); + DescribeTableRequest request = + DescribeTableRequest.builder().tableName(table).build(); final AWSExceptionManager exceptionManager = createExceptionManager(); exceptionManager.add(ResourceNotFoundException.class, t -> t); @@ -246,7 +376,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { DescribeTableResponse result; try { try { - result = FutureUtils.resolveOrCancelFuture(dynamoDBClient.describeTable(request), dynamoDbRequestTimeout); + result = FutureUtils.resolveOrCancelFuture( + dynamoDBClient.describeTable(request), dynamoDbRequestTimeout); } catch (ExecutionException e) { throw exceptionManager.apply(e.getCause()); } catch (InterruptedException e) { @@ -311,9 +442,9 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * {@inheritDoc} */ @Override - public List listLeasesForStream(StreamIdentifier streamIdentifier) throws DependencyException, - InvalidStateException, ProvisionedThroughputException { - return list( null, streamIdentifier); + public List listLeasesForStream(StreamIdentifier streamIdentifier) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + return list(null, streamIdentifier); } /** @@ -359,8 +490,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @throws DependencyException if DynamoDB scan fail in an unexpected way * @throws ProvisionedThroughputException if DynamoDB scan fail due to exceeded capacity */ - private List list(Integer limit, Integer maxPages, StreamIdentifier streamIdentifier) throws DependencyException, InvalidStateException, - ProvisionedThroughputException { + private List list(Integer limit, Integer maxPages, StreamIdentifier streamIdentifier) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Listing leases from table {}", table); @@ -368,9 +499,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { if (streamIdentifier != null) { final Map expressionAttributeValues = ImmutableMap.of( - DDB_STREAM_NAME, AttributeValue.builder().s(streamIdentifier.serialize()).build() - ); - scanRequestBuilder = scanRequestBuilder.filterExpression(STREAM_NAME + " = " + DDB_STREAM_NAME) + DDB_STREAM_NAME, + AttributeValue.builder().s(streamIdentifier.serialize()).build()); + scanRequestBuilder = scanRequestBuilder + .filterExpression(STREAM_NAME + " = " + DDB_STREAM_NAME) .expressionAttributeValues(expressionAttributeValues); } @@ -380,12 +512,13 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { ScanRequest scanRequest = scanRequestBuilder.build(); final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ResourceNotFoundException.class, t -> t); + exceptionManager.add(ResourceNotFoundException.class, t -> t); exceptionManager.add(ProvisionedThroughputExceededException.class, t -> t); try { try { - ScanResponse scanResult = FutureUtils.resolveOrCancelFuture(dynamoDBClient.scan(scanRequest), dynamoDbRequestTimeout); + ScanResponse scanResult = + FutureUtils.resolveOrCancelFuture(dynamoDBClient.scan(scanRequest), dynamoDbRequestTimeout); List result = new ArrayList<>(); while (scanResult != null) { @@ -401,9 +534,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { log.debug("lastEvaluatedKey was null - scan finished."); } else { // Make another request, picking up where we left off. - scanRequest = scanRequest.toBuilder().exclusiveStartKey(lastEvaluatedKey).build(); + scanRequest = scanRequest.toBuilder() + .exclusiveStartKey(lastEvaluatedKey) + .build(); log.debug("lastEvaluatedKey was {}, continuing scan.", lastEvaluatedKey); - scanResult = FutureUtils.resolveOrCancelFuture(dynamoDBClient.scan(scanRequest), dynamoDbRequestTimeout); + scanResult = FutureUtils.resolveOrCancelFuture( + dynamoDBClient.scan(scanRequest), dynamoDbRequestTimeout); } } log.debug("Listed {} leases from table {}", result.size(), table); @@ -431,8 +567,11 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Creating lease: {}", lease); - PutItemRequest request = PutItemRequest.builder().tableName(table).item(serializer.toDynamoRecord(lease)) - .expected(serializer.getDynamoNonexistantExpectation()).build(); + PutItemRequest request = PutItemRequest.builder() + .tableName(table) + .item(serializer.toDynamoRecord(lease)) + .expected(serializer.getDynamoNonexistantExpectation()) + .build(); final AWSExceptionManager exceptionManager = createExceptionManager(); exceptionManager.add(ConditionalCheckFailedException.class, t -> t); @@ -452,7 +591,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { } catch (DynamoDbException | TimeoutException e) { throw convertAndRethrowExceptions("create", lease.leaseKey(), e); } - log.info("Created lease: {}",lease); + log.info("Created lease: {}", lease); return true; } @@ -464,12 +603,16 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Getting lease with key {}", leaseKey); - GetItemRequest request = GetItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(leaseKey)) - .consistentRead(consistentReads).build(); + GetItemRequest request = GetItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(leaseKey)) + .consistentRead(consistentReads) + .build(); final AWSExceptionManager exceptionManager = createExceptionManager(); try { try { - GetItemResponse result = FutureUtils.resolveOrCancelFuture(dynamoDBClient.getItem(request), dynamoDbRequestTimeout); + GetItemResponse result = + FutureUtils.resolveOrCancelFuture(dynamoDBClient.getItem(request), dynamoDbRequestTimeout); Map dynamoRecord = result.item(); if (CollectionUtils.isNullOrEmpty(dynamoRecord)) { @@ -499,12 +642,15 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Renewing lease with key {}", lease.leaseKey()); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) .expected(serializer.getDynamoLeaseCounterExpectation(lease)) - .attributeUpdates(serializer.getDynamoLeaseCounterUpdate(lease)).build(); + .attributeUpdates(serializer.getDynamoLeaseCounterUpdate(lease)) + .build(); final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ConditionalCheckFailedException.class, t -> t); + exceptionManager.add(ConditionalCheckFailedException.class, t -> t); try { try { @@ -516,8 +662,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.debug("Lease renewal failed for lease with key {} because the lease counter was not {}", - lease.leaseKey(), lease.leaseCounter()); + log.debug( + "Lease renewal failed for lease with key {} because the lease counter was not {}", + lease.leaseKey(), + lease.leaseCounter()); // If we had a spurious retry during the Dynamo update, then this conditional PUT failure // might be incorrect. So, we get the item straight away and check if the lease owner + lease @@ -525,7 +673,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { String expectedOwner = lease.leaseOwner(); Long expectedCounter = lease.leaseCounter() + 1; final Lease updatedLease = getLease(lease.leaseKey()); - if (updatedLease == null || !expectedOwner.equals(updatedLease.leaseOwner()) + if (updatedLease == null + || !expectedOwner.equals(updatedLease.leaseOwner()) || !expectedCounter.equals(updatedLease.leaseCounter())) { return false; } @@ -548,8 +697,11 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { final String oldOwner = lease.leaseOwner(); - log.debug("Taking lease with leaseKey {} from {} to {}", lease.leaseKey(), - lease.leaseOwner() == null ? "nobody" : lease.leaseOwner(), owner); + log.debug( + "Taking lease with leaseKey {} from {} to {}", + lease.leaseKey(), + lease.leaseOwner() == null ? "nobody" : lease.leaseOwner(), + owner); final AWSExceptionManager exceptionManager = createExceptionManager(); exceptionManager.add(ConditionalCheckFailedException.class, t -> t); @@ -557,8 +709,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { Map updates = serializer.getDynamoLeaseCounterUpdate(lease); updates.putAll(serializer.getDynamoTakeLeaseUpdate(lease, owner)); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseCounterExpectation(lease)).attributeUpdates(updates).build(); + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .expected(serializer.getDynamoLeaseCounterExpectation(lease)) + .attributeUpdates(updates) + .build(); try { try { @@ -570,8 +726,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.debug("Lease renewal failed for lease with key {} because the lease counter was not {}", - lease.leaseKey(), lease.leaseCounter()); + log.debug( + "Lease renewal failed for lease with key {} because the lease counter was not {}", + lease.leaseKey(), + lease.leaseCounter()); return false; } catch (DynamoDbException | TimeoutException e) { throw convertAndRethrowExceptions("take", lease.leaseKey(), e); @@ -602,8 +760,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { Map updates = serializer.getDynamoLeaseCounterUpdate(lease); updates.putAll(serializer.getDynamoEvictLeaseUpdate(lease)); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseOwnerExpectation(lease)).attributeUpdates(updates).build(); + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .expected(serializer.getDynamoLeaseOwnerExpectation(lease)) + .attributeUpdates(updates) + .build(); try { try { @@ -615,8 +777,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.debug("Lease eviction failed for lease with key {} because the lease owner was not {}", - lease.leaseKey(), lease.leaseOwner()); + log.debug( + "Lease eviction failed for lease with key {} because the lease owner was not {}", + lease.leaseKey(), + lease.leaseOwner()); return false; } catch (DynamoDbException | TimeoutException e) { throw convertAndRethrowExceptions("evict", lease.leaseKey(), e); @@ -639,8 +803,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { final AWSExceptionManager exceptionManager = createExceptionManager(); for (final Lease lease : allLeases) { - DeleteItemRequest deleteRequest = DeleteItemRequest.builder().tableName(table) - .key(serializer.getDynamoHashKey(lease)).build(); + DeleteItemRequest deleteRequest = DeleteItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .build(); try { try { @@ -666,8 +832,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Deleting lease with leaseKey {}", lease.leaseKey()); - DeleteItemRequest deleteRequest = DeleteItemRequest.builder().tableName(table) - .key(serializer.getDynamoHashKey(lease)).build(); + DeleteItemRequest deleteRequest = DeleteItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .build(); final AWSExceptionManager exceptionManager = createExceptionManager(); try { @@ -700,8 +868,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { Map updates = serializer.getDynamoLeaseCounterUpdate(lease); updates.putAll(serializer.getDynamoUpdateLeaseUpdate(lease)); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseCounterExpectation(lease)).attributeUpdates(updates).build(); + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .expected(serializer.getDynamoLeaseCounterExpectation(lease)) + .attributeUpdates(updates) + .build(); try { try { @@ -712,8 +884,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.debug("Lease update failed for lease with key {} because the lease counter was not {}", - lease.leaseKey(), lease.leaseCounter()); + log.debug( + "Lease update failed for lease with key {} because the lease counter was not {}", + lease.leaseKey(), + lease.leaseCounter()); return false; } catch (DynamoDbException | TimeoutException e) { throw convertAndRethrowExceptions("update", lease.leaseKey(), e); @@ -731,9 +905,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { final AWSExceptionManager exceptionManager = createExceptionManager(); exceptionManager.add(ConditionalCheckFailedException.class, t -> t); Map updates = serializer.getDynamoUpdateLeaseUpdate(lease, updateField); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) .expected(serializer.getDynamoExistentExpectation(lease.leaseKey())) - .attributeUpdates(updates).build(); + .attributeUpdates(updates) + .build(); try { try { FutureUtils.resolveOrCancelFuture(dynamoDBClient.updateItem(request), dynamoDbRequestTimeout); @@ -743,8 +920,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.warn("Lease update failed for lease with key {} because the lease did not exist at the time of the update", - lease.leaseKey(), e); + log.warn( + "Lease update failed for lease with key {} because the lease did not exist at the time of the update", + lease.leaseKey(), + e); } catch (DynamoDbException | TimeoutException e) { throw convertAndRethrowExceptions("update", lease.leaseKey(), e); } @@ -779,14 +958,27 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new ProvisionedThroughputException(e); } else if (e instanceof ResourceNotFoundException) { throw new InvalidStateException( - String.format("Cannot %s lease with key %s because table %s does not exist.", - operation, leaseKey, table), + String.format( + "Cannot %s lease with key %s because table %s does not exist.", operation, leaseKey, table), e); } else { return new DependencyException(e); } } + private CreateTableRequest.Builder createTableRequestBuilder() { + final CreateTableRequest.Builder builder = CreateTableRequest.builder() + .tableName(table) + .keySchema(serializer.getKeySchema()) + .attributeDefinitions(serializer.getAttributeDefinitions()) + .deletionProtectionEnabled(leaseTableDeletionProtectionEnabled) + .tags(tags); + if (BillingMode.PAY_PER_REQUEST.equals(billingMode)) { + builder.billingMode(billingMode); + } + return builder; + } + private AWSExceptionManager createExceptionManager() { final AWSExceptionManager exceptionManager = new AWSExceptionManager(); exceptionManager.add(DynamoDbException.class, t -> t); @@ -794,7 +986,9 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { } void performPostTableCreationAction() { - tableCreatorCallback.performAction( - TableCreatorCallbackInput.builder().dynamoDbClient(dynamoDBClient).tableName(table).build()); + tableCreatorCallback.performAction(TableCreatorCallbackInput.builder() + .dynamoDbClient(dynamoDBClient) + .tableName(table) + .build()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java index ab2d38c5..a53ec4ab 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java @@ -29,11 +29,10 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang3.StringUtils; - import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.common.StreamIdentifier; @@ -78,8 +77,11 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { * @param executorService * ExecutorService to use for renewing leases in parallel */ - public DynamoDBLeaseRenewer(final LeaseRefresher leaseRefresher, final String workerIdentifier, - final long leaseDurationMillis, final ExecutorService executorService, + public DynamoDBLeaseRenewer( + final LeaseRefresher leaseRefresher, + final String workerIdentifier, + final long leaseDurationMillis, + final ExecutorService executorService, final MetricsFactory metricsFactory) { this.leaseRefresher = leaseRefresher; this.workerIdentifier = workerIdentifier; @@ -108,10 +110,10 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { boolean success = false; try { - /* - * We iterate in descending order here so that the synchronized(lease) inside renewLease doesn't "lead" calls - * to getCurrentlyHeldLeases. They'll still cross paths, but they won't interleave their executions. - */ + /* + * We iterate in descending order here so that the synchronized(lease) inside renewLease doesn't "lead" calls + * to getCurrentlyHeldLeases. They'll still cross paths, but they won't interleave their executions. + */ int lostLeases = 0; List> renewLeaseTasks = new ArrayList<>(); for (Lease lease : ownedLeases.descendingMap().values()) { @@ -139,8 +141,10 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { scope.addData("CurrentLeases", ownedLeases.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY); if (leasesInUnknownState > 0) { throw new DependencyException( - String.format("Encountered an exception while renewing leases. The number" - + " of leases which might not have been renewed is %d", leasesInUnknownState), + String.format( + "Encountered an exception while renewing leases. The number" + + " of leases which might not have been renewed is %d", + leasesInUnknownState), lastException); } success = true; @@ -165,7 +169,8 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { return renewLease(lease, false); } - private boolean renewLease(Lease lease, boolean renewEvenIfExpired) throws DependencyException, InvalidStateException { + private boolean renewLease(Lease lease, boolean renewEvenIfExpired) + throws DependencyException, InvalidStateException { String leaseKey = lease.leaseKey(); final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, RENEW_ALL_LEASES_DIMENSION); @@ -201,8 +206,12 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { success = true; break; } catch (ProvisionedThroughputException e) { - log.info("Worker {} could not renew lease with key {} on try {} out of {} due to capacity", - workerIdentifier, leaseKey, i, RENEWAL_RETRIES); + log.info( + "Worker {} could not renew lease with key {} on try {} out of {} due to capacity", + workerIdentifier, + leaseKey, + i, + RENEWAL_RETRIES); } } } finally { @@ -258,8 +267,8 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { } if (copy.isExpired(leaseDurationNanos, now)) { - log.info("getCurrentlyHeldLease not returning lease with key {} because it is expired", - copy.leaseKey()); + log.info( + "getCurrentlyHeldLease not returning lease with key {} because it is expired", copy.leaseKey()); return null; } else { return copy; @@ -271,8 +280,9 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { * {@inheritDoc} */ @Override - public boolean updateLease(Lease lease, UUID concurrencyToken, @NonNull String operation, String singleStreamShardId) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public boolean updateLease( + Lease lease, UUID concurrencyToken, @NonNull String operation, String singleStreamShardId) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { verifyNotNull(lease, "lease cannot be null"); verifyNotNull(lease.leaseKey(), "leaseKey cannot be null"); verifyNotNull(concurrencyToken, "concurrencyToken cannot be null"); @@ -281,7 +291,9 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { Lease authoritativeLease = ownedLeases.get(leaseKey); if (authoritativeLease == null) { - log.info("Worker {} could not update lease with key {} because it does not hold it", workerIdentifier, + log.info( + "Worker {} could not update lease with key {} because it does not hold it", + workerIdentifier, leaseKey); return false; } @@ -292,15 +304,17 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { * called update. */ if (!authoritativeLease.concurrencyToken().equals(concurrencyToken)) { - log.info("Worker {} refusing to update lease with key {} because concurrency tokens don't match", - workerIdentifier, leaseKey); + log.info( + "Worker {} refusing to update lease with key {} because concurrency tokens don't match", + workerIdentifier, + leaseKey); return false; } final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, operation); if (lease instanceof MultiStreamLease) { - MetricsUtil.addStreamId(scope, - StreamIdentifier.multiStreamInstance(((MultiStreamLease) lease).streamIdentifier())); + MetricsUtil.addStreamId( + scope, StreamIdentifier.multiStreamInstance(((MultiStreamLease) lease).streamIdentifier())); MetricsUtil.addShardId(scope, ((MultiStreamLease) lease).shardId()); } else if (StringUtils.isNotEmpty(singleStreamShardId)) { MetricsUtil.addShardId(scope, singleStreamShardId); @@ -308,6 +322,7 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { long startTime = System.currentTimeMillis(); boolean success = false; + Lease authoritativeLeaseCopy = authoritativeLease.copy(); try { log.info("Updating lease from {} to {}", authoritativeLease, lease); synchronized (authoritativeLease) { @@ -344,6 +359,10 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { success = true; return updatedLease; } + } catch (ProvisionedThroughputException | InvalidStateException | DependencyException e) { + // On failure in updating DDB, revert changes to in memory lease + authoritativeLease.update(authoritativeLeaseCopy); + throw e; } finally { MetricsUtil.addSuccessAndLatency(scope, "UpdateLease", success, startTime, MetricsLevel.DETAILED); MetricsUtil.endScope(scope); @@ -359,7 +378,8 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { for (Lease lease : newLeases) { if (lease.lastCounterIncrementNanos() == null) { - log.info("addLeasesToRenew ignoring lease with key {} because it does not have lastRenewalNanos set", + log.info( + "addLeasesToRenew ignoring lease with key {} because it does not have lastRenewalNanos set", lease.leaseKey()); continue; } @@ -424,5 +444,4 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { throw new IllegalArgumentException(message); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java index 64a7840c..c10cf475 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java @@ -20,7 +20,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import com.google.common.base.Strings; import software.amazon.awssdk.services.dynamodb.model.AttributeAction; import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; @@ -71,8 +70,12 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { } result.put(OWNER_SWITCHES_KEY, DynamoUtils.createAttributeValue(lease.ownerSwitchesSinceCheckpoint())); - result.put(CHECKPOINT_SEQUENCE_NUMBER_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber())); - result.put(CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber())); + result.put( + CHECKPOINT_SEQUENCE_NUMBER_KEY, + DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber())); + result.put( + CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, + DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber())); if (lease.parentShardIds() != null && !lease.parentShardIds().isEmpty()) { result.put(PARENT_SHARD_ID_KEY, DynamoUtils.createAttributeValue(lease.parentShardIds())); } @@ -80,18 +83,31 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { result.put(CHILD_SHARD_IDS_KEY, DynamoUtils.createAttributeValue(lease.childShardIds())); } - if (lease.pendingCheckpoint() != null && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { - result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, DynamoUtils.createAttributeValue(lease.pendingCheckpoint().sequenceNumber())); - result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, DynamoUtils.createAttributeValue(lease.pendingCheckpoint().subSequenceNumber())); + if (lease.pendingCheckpoint() != null + && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { + result.put( + PENDING_CHECKPOINT_SEQUENCE_KEY, + DynamoUtils.createAttributeValue(lease.pendingCheckpoint().sequenceNumber())); + result.put( + PENDING_CHECKPOINT_SUBSEQUENCE_KEY, + DynamoUtils.createAttributeValue(lease.pendingCheckpoint().subSequenceNumber())); } if (lease.pendingCheckpointState() != null) { - result.put(PENDING_CHECKPOINT_STATE_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber())); + result.put( + PENDING_CHECKPOINT_STATE_KEY, + DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber())); } - if(lease.hashKeyRangeForLease() != null) { - result.put(STARTING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey())); - result.put(ENDING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey())); + if (lease.hashKeyRangeForLease() != null) { + result.put( + STARTING_HASH_KEY, + DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedStartingHashKey())); + result.put( + ENDING_HASH_KEY, + DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedEndingHashKey())); } return result; @@ -110,20 +126,16 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { leaseToUpdate.leaseCounter(DynamoUtils.safeGetLong(dynamoRecord, LEASE_COUNTER_KEY)); leaseToUpdate.ownerSwitchesSinceCheckpoint(DynamoUtils.safeGetLong(dynamoRecord, OWNER_SWITCHES_KEY)); - leaseToUpdate.checkpoint( - new ExtendedSequenceNumber( - DynamoUtils.safeGetString(dynamoRecord, CHECKPOINT_SEQUENCE_NUMBER_KEY), - DynamoUtils.safeGetLong(dynamoRecord, CHECKPOINT_SUBSEQUENCE_NUMBER_KEY)) - ); + leaseToUpdate.checkpoint(new ExtendedSequenceNumber( + DynamoUtils.safeGetString(dynamoRecord, CHECKPOINT_SEQUENCE_NUMBER_KEY), + DynamoUtils.safeGetLong(dynamoRecord, CHECKPOINT_SUBSEQUENCE_NUMBER_KEY))); leaseToUpdate.parentShardIds(DynamoUtils.safeGetSS(dynamoRecord, PARENT_SHARD_ID_KEY)); leaseToUpdate.childShardIds(DynamoUtils.safeGetSS(dynamoRecord, CHILD_SHARD_IDS_KEY)); if (!Strings.isNullOrEmpty(DynamoUtils.safeGetString(dynamoRecord, PENDING_CHECKPOINT_SEQUENCE_KEY))) { - leaseToUpdate.pendingCheckpoint( - new ExtendedSequenceNumber( - DynamoUtils.safeGetString(dynamoRecord, PENDING_CHECKPOINT_SEQUENCE_KEY), - DynamoUtils.safeGetLong(dynamoRecord, PENDING_CHECKPOINT_SUBSEQUENCE_KEY)) - ); + leaseToUpdate.pendingCheckpoint(new ExtendedSequenceNumber( + DynamoUtils.safeGetString(dynamoRecord, PENDING_CHECKPOINT_SEQUENCE_KEY), + DynamoUtils.safeGetLong(dynamoRecord, PENDING_CHECKPOINT_SUBSEQUENCE_KEY))); } leaseToUpdate.pendingCheckpointState(DynamoUtils.safeGetByteArray(dynamoRecord, PENDING_CHECKPOINT_STATE_KEY)); @@ -159,7 +171,9 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { public Map getDynamoLeaseCounterExpectation(final Long leaseCounter) { Map result = new HashMap<>(); - ExpectedAttributeValue eav = ExpectedAttributeValue.builder().value(DynamoUtils.createAttributeValue(leaseCounter)).build(); + ExpectedAttributeValue eav = ExpectedAttributeValue.builder() + .value(DynamoUtils.createAttributeValue(leaseCounter)) + .build(); result.put(LEASE_COUNTER_KEY, eav); return result; @@ -170,13 +184,13 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { Map result = new HashMap<>(); ExpectedAttributeValue.Builder eavBuilder = ExpectedAttributeValue.builder(); - + if (lease.leaseOwner() == null) { eavBuilder = eavBuilder.exists(false); } else { eavBuilder = eavBuilder.value(DynamoUtils.createAttributeValue(lease.leaseOwner())); } - + result.put(LEASE_OWNER_KEY, eavBuilder.build()); return result; @@ -186,7 +200,8 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { public Map getDynamoNonexistantExpectation() { Map result = new HashMap<>(); - ExpectedAttributeValue expectedAV = ExpectedAttributeValue.builder().exists(false).build(); + ExpectedAttributeValue expectedAV = + ExpectedAttributeValue.builder().exists(false).build(); result.put(LEASE_KEY_KEY, expectedAV); return result; @@ -213,8 +228,10 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { public Map getDynamoLeaseCounterUpdate(Long leaseCounter) { Map result = new HashMap<>(); - AttributeValueUpdate avu = - AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(leaseCounter + 1)).action(AttributeAction.PUT).build(); + AttributeValueUpdate avu = AttributeValueUpdate.builder() + .value(DynamoUtils.createAttributeValue(leaseCounter + 1)) + .action(AttributeAction.PUT) + .build(); result.put(LEASE_COUNTER_KEY, avu); return result; @@ -224,11 +241,21 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { public Map getDynamoTakeLeaseUpdate(final Lease lease, String owner) { Map result = new HashMap<>(); - result.put(LEASE_OWNER_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(owner)).action(AttributeAction.PUT).build()); + result.put( + LEASE_OWNER_KEY, + AttributeValueUpdate.builder() + .value(DynamoUtils.createAttributeValue(owner)) + .action(AttributeAction.PUT) + .build()); String oldOwner = lease.leaseOwner(); if (oldOwner != null && !oldOwner.equals(owner)) { - result.put(OWNER_SWITCHES_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(1L)).action(AttributeAction.ADD).build()); + result.put( + OWNER_SWITCHES_KEY, + AttributeValueUpdate.builder() + .value(DynamoUtils.createAttributeValue(1L)) + .action(AttributeAction.ADD) + .build()); } return result; @@ -239,67 +266,109 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { Map result = new HashMap<>(); AttributeValue value = null; - result.put(LEASE_OWNER_KEY, AttributeValueUpdate.builder().value(value).action(AttributeAction.DELETE).build()); + result.put( + LEASE_OWNER_KEY, + AttributeValueUpdate.builder() + .value(value) + .action(AttributeAction.DELETE) + .build()); return result; } protected AttributeValueUpdate putUpdate(AttributeValue attributeValue) { - return AttributeValueUpdate.builder().value(attributeValue).action(AttributeAction.PUT).build(); + return AttributeValueUpdate.builder() + .value(attributeValue) + .action(AttributeAction.PUT) + .build(); } @Override public Map getDynamoUpdateLeaseUpdate(final Lease lease) { Map result = new HashMap<>(); - result.put(CHECKPOINT_SEQUENCE_NUMBER_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber()))); - result.put(CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber()))); - result.put(OWNER_SWITCHES_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.ownerSwitchesSinceCheckpoint()))); + result.put( + CHECKPOINT_SEQUENCE_NUMBER_KEY, + putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber()))); + result.put( + CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, + putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber()))); + result.put( + OWNER_SWITCHES_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.ownerSwitchesSinceCheckpoint()))); - if (lease.pendingCheckpoint() != null && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { - result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpoint().sequenceNumber()))); - result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpoint().subSequenceNumber()))); + if (lease.pendingCheckpoint() != null + && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { + result.put( + PENDING_CHECKPOINT_SEQUENCE_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.pendingCheckpoint().sequenceNumber()))); + result.put( + PENDING_CHECKPOINT_SUBSEQUENCE_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.pendingCheckpoint().subSequenceNumber()))); } else { - result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build()); - result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build()); + result.put( + PENDING_CHECKPOINT_SEQUENCE_KEY, + AttributeValueUpdate.builder() + .action(AttributeAction.DELETE) + .build()); + result.put( + PENDING_CHECKPOINT_SUBSEQUENCE_KEY, + AttributeValueUpdate.builder() + .action(AttributeAction.DELETE) + .build()); } if (lease.pendingCheckpointState() != null) { - result.put(PENDING_CHECKPOINT_STATE_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpointState()))); + result.put( + PENDING_CHECKPOINT_STATE_KEY, + putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpointState()))); } else { - result.put(PENDING_CHECKPOINT_STATE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build()); + result.put( + PENDING_CHECKPOINT_STATE_KEY, + AttributeValueUpdate.builder() + .action(AttributeAction.DELETE) + .build()); } - if (!CollectionUtils.isNullOrEmpty(lease.childShardIds())) { result.put(CHILD_SHARD_IDS_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.childShardIds()))); } - if(lease.hashKeyRangeForLease() != null) { - result.put(STARTING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey()))); - result.put(ENDING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey()))); + if (lease.hashKeyRangeForLease() != null) { + result.put( + STARTING_HASH_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedStartingHashKey()))); + result.put( + ENDING_HASH_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedEndingHashKey()))); } return result; } @Override - public Map getDynamoUpdateLeaseUpdate(Lease lease, - UpdateField updateField) { + public Map getDynamoUpdateLeaseUpdate(Lease lease, UpdateField updateField) { Map result = new HashMap<>(); switch (updateField) { - case CHILD_SHARDS: - if (!CollectionUtils.isNullOrEmpty(lease.childShardIds())) { - result.put(CHILD_SHARD_IDS_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.childShardIds()))); - } - break; - case HASH_KEY_RANGE: - if (lease.hashKeyRangeForLease() != null) { - result.put(STARTING_HASH_KEY, putUpdate( - DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey()))); - result.put(ENDING_HASH_KEY, putUpdate( - DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey()))); - } - break; + case CHILD_SHARDS: + if (!CollectionUtils.isNullOrEmpty(lease.childShardIds())) { + result.put(CHILD_SHARD_IDS_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.childShardIds()))); + } + break; + case HASH_KEY_RANGE: + if (lease.hashKeyRangeForLease() != null) { + result.put( + STARTING_HASH_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedStartingHashKey()))); + result.put( + ENDING_HASH_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedEndingHashKey()))); + } + break; } return result; } @@ -307,7 +376,10 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { @Override public Collection getKeySchema() { List keySchema = new ArrayList<>(); - keySchema.add(KeySchemaElement.builder().attributeName(LEASE_KEY_KEY).keyType(KeyType.HASH).build()); + keySchema.add(KeySchemaElement.builder() + .attributeName(LEASE_KEY_KEY) + .keyType(KeyType.HASH) + .build()); return keySchema; } @@ -315,8 +387,10 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { @Override public Collection getAttributeDefinitions() { List definitions = new ArrayList<>(); - definitions.add(AttributeDefinition.builder().attributeName(LEASE_KEY_KEY) - .attributeType(ScalarAttributeType.S).build()); + definitions.add(AttributeDefinition.builder() + .attributeName(LEASE_KEY_KEY) + .attributeType(ScalarAttributeType.S) + .build()); return definitions; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java index 9fb91f14..7e494204 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java @@ -14,8 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import com.google.common.annotations.VisibleForTesting; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -28,6 +26,8 @@ import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; + +import com.google.common.annotations.VisibleForTesting; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; import software.amazon.kinesis.annotations.KinesisClientInternalApi; @@ -69,11 +69,14 @@ public class DynamoDBLeaseTaker implements LeaseTaker { // TODO: Remove these defaults and use the defaults in the config private int maxLeasesForWorker = Integer.MAX_VALUE; private int maxLeasesToStealAtOneTime = 1; - - private long veryOldLeaseDurationNanosMultiplier = 3; + private boolean enablePriorityLeaseAssignment = true; + private int veryOldLeaseDurationNanosMultiplier = 3; private long lastScanTimeNanos = 0L; - public DynamoDBLeaseTaker(LeaseRefresher leaseRefresher, String workerIdentifier, long leaseDurationMillis, + public DynamoDBLeaseTaker( + LeaseRefresher leaseRefresher, + String workerIdentifier, + long leaseDurationMillis, final MetricsFactory metricsFactory) { this.leaseRefresher = leaseRefresher; this.workerIdentifier = workerIdentifier; @@ -103,15 +106,29 @@ public class DynamoDBLeaseTaker implements LeaseTaker { return this; } + /** + * @deprecated Misspelled method, use {@link DynamoDBLeaseTaker#withVeryOldLeaseDurationNanosMultiplier(int)} + */ + @Deprecated + public DynamoDBLeaseTaker withVeryOldLeaseDurationNanosMultipler(long veryOldLeaseDurationNanosMultipler) { + this.veryOldLeaseDurationNanosMultiplier = (int) veryOldLeaseDurationNanosMultipler; + return this; + } + /** * Overrides the default very old lease duration nanos multiplier to increase the threshold for taking very old leases. * Setting this to a higher value than 3 will increase the threshold for very old lease taking. * - * @param veryOldLeaseDurationNanosMultipler Very old lease duration multiplier for adjusting very old lease taking. + * @param veryOldLeaseDurationNanosMultiplier Very old lease duration multiplier for adjusting very old lease taking. * @return LeaseTaker */ - public DynamoDBLeaseTaker withVeryOldLeaseDurationNanosMultipler(long veryOldLeaseDurationNanosMultipler) { - this.veryOldLeaseDurationNanosMultiplier = veryOldLeaseDurationNanosMultipler; + public DynamoDBLeaseTaker withVeryOldLeaseDurationNanosMultiplier(int veryOldLeaseDurationNanosMultiplier) { + this.veryOldLeaseDurationNanosMultiplier = veryOldLeaseDurationNanosMultiplier; + return this; + } + + public DynamoDBLeaseTaker withEnablePriorityLeaseAssignment(boolean enablePriorityLeaseAssignment) { + this.enablePriorityLeaseAssignment = enablePriorityLeaseAssignment; return this; } @@ -153,7 +170,7 @@ public class DynamoDBLeaseTaker implements LeaseTaker { * @throws InvalidStateException */ synchronized Map takeLeases(Callable timeProvider) - throws DependencyException, InvalidStateException { + throws DependencyException, InvalidStateException { // Key is leaseKey Map takenLeases = new HashMap<>(); @@ -172,7 +189,10 @@ public class DynamoDBLeaseTaker implements LeaseTaker { updateAllLeases(timeProvider); success = true; } catch (ProvisionedThroughputException e) { - log.info("Worker {} could not find expired leases on try {} out of {}", workerIdentifier, i, + log.info( + "Worker {} could not find available leases on try {} out of {}", + workerIdentifier, + i, TAKE_RETRIES); lastException = e; } @@ -184,14 +204,17 @@ public class DynamoDBLeaseTaker implements LeaseTaker { } if (lastException != null) { - log.error("Worker {} could not scan leases table, aborting TAKE_LEASES_DIMENSION. Exception caught by" - + " last retry:", workerIdentifier, lastException); + log.error( + "Worker {} could not scan leases table, aborting TAKE_LEASES_DIMENSION. Exception caught by" + + " last retry:", + workerIdentifier, + lastException); return takenLeases; } - List expiredLeases = getExpiredLeases(); + List availableLeases = getAvailableLeases(); - Set leasesToTake = computeLeasesToTake(expiredLeases); + Set leasesToTake = computeLeasesToTake(availableLeases, timeProvider); leasesToTake = updateStaleLeasesWithLatestState(updateAllLeasesTotalTimeMillis, leasesToTake); Set untakenLeaseKeys = new HashSet<>(); @@ -214,8 +237,13 @@ public class DynamoDBLeaseTaker implements LeaseTaker { success = true; break; } catch (ProvisionedThroughputException e) { - log.info("Could not take lease with key {} for worker {} on try {} out of {} due to" - + " capacity", leaseKey, workerIdentifier, i, TAKE_RETRIES); + log.info( + "Could not take lease with key {} for worker {} on try {} out of {} due to" + + " capacity", + leaseKey, + workerIdentifier, + i, + TAKE_RETRIES); } } } finally { @@ -224,12 +252,18 @@ public class DynamoDBLeaseTaker implements LeaseTaker { } if (takenLeases.size() > 0) { - log.info("Worker {} successfully took {} leases: {}", workerIdentifier, takenLeases.size(), + log.info( + "Worker {} successfully took {} leases: {}", + workerIdentifier, + takenLeases.size(), stringJoin(takenLeases.keySet(), ", ")); } if (untakenLeaseKeys.size() > 0) { - log.info("Worker {} failed to take {} leases: {}", workerIdentifier, untakenLeaseKeys.size(), + log.info( + "Worker {} failed to take {} leases: {}", + workerIdentifier, + untakenLeaseKeys.size(), stringJoin(untakenLeaseKeys, ", ")); } @@ -251,21 +285,25 @@ public class DynamoDBLeaseTaker implements LeaseTaker { * @param updateAllLeasesEndTime How long it takes for update all leases to complete * @return set of leases to take. */ - private Set updateStaleLeasesWithLatestState(long updateAllLeasesEndTime, - Set leasesToTake) { + private Set updateStaleLeasesWithLatestState(long updateAllLeasesEndTime, Set leasesToTake) { if (updateAllLeasesEndTime > leaseRenewalIntervalMillis * RENEWAL_SLACK_PERCENTAGE) { - leasesToTake = leasesToTake.stream().map(lease -> { - if (lease.isMarkedForLeaseSteal()) { - try { - log.debug("Updating stale lease {}.", lease.leaseKey()); - return leaseRefresher.getLease(lease.leaseKey()); - } catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) { - log.warn("Failed to fetch latest state of the lease {} that needs to be stolen, " - + "defaulting to existing lease", lease.leaseKey(), e); - } - } - return lease; - }).collect(Collectors.toSet()); + leasesToTake = leasesToTake.stream() + .map(lease -> { + if (lease.isMarkedForLeaseSteal()) { + try { + log.debug("Updating stale lease {}.", lease.leaseKey()); + return leaseRefresher.getLease(lease.leaseKey()); + } catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) { + log.warn( + "Failed to fetch latest state of the lease {} that needs to be stolen, " + + "defaulting to existing lease", + lease.leaseKey(), + e); + } + } + return lease; + }) + .collect(Collectors.toSet()); } return leasesToTake; } @@ -295,14 +333,14 @@ public class DynamoDBLeaseTaker implements LeaseTaker { * * @param timeProvider callable that supplies the current time * - * @return list of expired leases, possibly empty, never null. + * @return list of available leases, possibly empty, never null. * * @throws ProvisionedThroughputException if listLeases fails due to lack of provisioned throughput * @throws InvalidStateException if the lease table does not exist * @throws DependencyException if listLeases fails in an unexpected way */ private void updateAllLeases(Callable timeProvider) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { List freshList = leaseRefresher.listLeases(); try { lastScanTimeNanos = timeProvider.call(); @@ -335,14 +373,16 @@ public class DynamoDBLeaseTaker implements LeaseTaker { lease.lastCounterIncrementNanos(0L); if (log.isDebugEnabled()) { - log.debug("Treating new lease with key {} as never renewed because it is new and unowned.", + log.debug( + "Treating new lease with key {} as never renewed because it is new and unowned.", leaseKey); } } else { // if this new lease is owned, treat it as renewed as of the scan lease.lastCounterIncrementNanos(lastScanTimeNanos); if (log.isDebugEnabled()) { - log.debug("Treating new lease with key {} as recently renewed because it is new and owned.", + log.debug( + "Treating new lease with key {} as recently renewed because it is new and owned.", leaseKey); } } @@ -356,43 +396,37 @@ public class DynamoDBLeaseTaker implements LeaseTaker { } /** - * @return list of leases that were expired as of our last scan. + * @return list of leases that available as of our last scan. */ - private List getExpiredLeases() { - List expiredLeases = new ArrayList<>(); - - for (Lease lease : allLeases.values()) { - if (lease.isExpired(leaseDurationNanos, lastScanTimeNanos)) { - expiredLeases.add(lease); - } - } - - return expiredLeases; + private List getAvailableLeases() { + return allLeases.values().stream() + .filter(lease -> lease.isAvailable(leaseDurationNanos, lastScanTimeNanos)) + .collect(Collectors.toList()); } /** * Compute the number of leases I should try to take based on the state of the system. * - * @param expiredLeases list of leases we determined to be expired + * @param availableLeases list of leases we determined to be available + * @param timeProvider callable which returns the current time in nanos * @return set of leases to take. */ - private Set computeLeasesToTake(List expiredLeases) { - Map leaseCounts = computeLeaseCounts(expiredLeases); + @VisibleForTesting + Set computeLeasesToTake(List availableLeases, Callable timeProvider) + throws DependencyException { + Map leaseCounts = computeLeaseCounts(availableLeases); Set leasesToTake = new HashSet<>(); final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, TAKE_LEASES_DIMENSION); MetricsUtil.addWorkerIdentifier(scope, workerIdentifier); - final int numAvailableLeases = expiredLeases.size(); - int numLeases = 0; - int numWorkers = 0; + final int numAvailableLeases = availableLeases.size(); + final int numLeases = allLeases.size(); + final int numWorkers = leaseCounts.size(); int numLeasesToReachTarget = 0; int leaseSpillover = 0; int veryOldLeaseCount = 0; try { - numLeases = allLeases.size(); - numWorkers = leaseCounts.size(); - if (numLeases == 0) { // If there are no leases, I shouldn't try to take any. return leasesToTake; @@ -418,7 +452,11 @@ public class DynamoDBLeaseTaker implements LeaseTaker { "Worker {} target is {} leases and maxLeasesForWorker is {}. Resetting target to {}," + " lease spillover is {}. Note that some shards may not be processed if no other " + "workers are able to pick them up.", - workerIdentifier, target, maxLeasesForWorker, maxLeasesForWorker, leaseSpillover); + workerIdentifier, + target, + maxLeasesForWorker, + maxLeasesForWorker, + leaseSpillover); target = maxLeasesForWorker; } } @@ -430,19 +468,28 @@ public class DynamoDBLeaseTaker implements LeaseTaker { // If there are leases that have been expired for an extended period of // time, take them with priority, disregarding the target (computed // later) but obeying the maximum limit per worker. - final long nanoThreshold = System.nanoTime() - (veryOldLeaseDurationNanosMultiplier * leaseDurationNanos); - final List veryOldLeases = allLeases.values().stream() - .filter(lease -> nanoThreshold > lease.lastCounterIncrementNanos()) - .collect(Collectors.toList()); - - if (!veryOldLeases.isEmpty()) { - Collections.shuffle(veryOldLeases); - veryOldLeaseCount = Math.max(0, Math.min(maxLeasesForWorker - currentLeaseCount, veryOldLeases.size())); - HashSet result = new HashSet<>(veryOldLeases.subList(0, veryOldLeaseCount)); - if (veryOldLeaseCount > 0) { - log.info("Taking leases that have been expired for a long time: {}", result); + if (enablePriorityLeaseAssignment) { + long currentNanoTime; + try { + currentNanoTime = timeProvider.call(); + } catch (Exception e) { + throw new DependencyException("Exception caught from timeProvider", e); + } + final long nanoThreshold = currentNanoTime - (veryOldLeaseDurationNanosMultiplier * leaseDurationNanos); + final List veryOldLeases = allLeases.values().stream() + .filter(lease -> nanoThreshold > lease.lastCounterIncrementNanos()) + .collect(Collectors.toList()); + + if (!veryOldLeases.isEmpty()) { + Collections.shuffle(veryOldLeases); + veryOldLeaseCount = + Math.max(0, Math.min(maxLeasesForWorker - currentLeaseCount, veryOldLeases.size())); + HashSet result = new HashSet<>(veryOldLeases.subList(0, veryOldLeaseCount)); + if (veryOldLeaseCount > 0) { + log.info("Taking leases that have been expired for a long time: {}", result); + } + return result; } - return result; } if (numLeasesToReachTarget <= 0) { @@ -450,20 +497,23 @@ public class DynamoDBLeaseTaker implements LeaseTaker { return leasesToTake; } - // Shuffle expiredLeases so workers don't all try to contend for the same leases. - Collections.shuffle(expiredLeases); + // Shuffle availableLeases so workers don't all try to contend for the same leases. + Collections.shuffle(availableLeases); - if (expiredLeases.size() > 0) { - // If we have expired leases, get up to leases from expiredLeases - for (; numLeasesToReachTarget > 0 && expiredLeases.size() > 0; numLeasesToReachTarget--) { - leasesToTake.add(expiredLeases.remove(0)); + if (availableLeases.size() > 0) { + // If we have available leases, get up to leases from availableLeases + for (; numLeasesToReachTarget > 0 && availableLeases.size() > 0; numLeasesToReachTarget--) { + leasesToTake.add(availableLeases.remove(0)); } } else { - // If there are no expired leases and we need a lease, consider stealing. + // If there are no available leases and we need a lease, consider stealing. List leasesToSteal = chooseLeasesToSteal(leaseCounts, numLeasesToReachTarget, target); for (Lease leaseToSteal : leasesToSteal) { - log.info("Worker {} needed {} leases but none were expired, so it will steal lease {} from {}", - workerIdentifier, numLeasesToReachTarget, leaseToSteal.leaseKey(), + log.info( + "Worker {} needed {} leases but none were available, so it will steal lease {} from {}", + workerIdentifier, + numLeasesToReachTarget, + leaseToSteal.leaseKey(), leaseToSteal.leaseOwner()); leasesToTake.add(leaseToSteal); } @@ -473,14 +523,20 @@ public class DynamoDBLeaseTaker implements LeaseTaker { log.info( "Worker {} saw {} total leases, {} available leases, {} " + "workers. Target is {} leases, I have {} leases, I will take {} leases", - workerIdentifier, numLeases, numAvailableLeases, numWorkers, target, myCount, + workerIdentifier, + numLeases, + numAvailableLeases, + numWorkers, + target, + myCount, leasesToTake.size()); } } finally { - scope.addData("ExpiredLeases", expiredLeases.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY); + scope.addData("ExpiredLeases", numAvailableLeases, StandardUnit.COUNT, MetricsLevel.SUMMARY); scope.addData("LeaseSpillover", leaseSpillover, StandardUnit.COUNT, MetricsLevel.SUMMARY); scope.addData("LeasesToTake", leasesToTake.size(), StandardUnit.COUNT, MetricsLevel.DETAILED); - scope.addData("NeededLeases", Math.max(numLeasesToReachTarget, 0), StandardUnit.COUNT, MetricsLevel.DETAILED); + scope.addData( + "NeededLeases", Math.max(numLeasesToReachTarget, 0), StandardUnit.COUNT, MetricsLevel.DETAILED); scope.addData("NumWorkers", numWorkers, StandardUnit.COUNT, MetricsLevel.SUMMARY); scope.addData("TotalLeases", numLeases, StandardUnit.COUNT, MetricsLevel.DETAILED); scope.addData("VeryOldLeases", veryOldLeaseCount, StandardUnit.COUNT, MetricsLevel.SUMMARY); @@ -528,19 +584,17 @@ public class DynamoDBLeaseTaker implements LeaseTaker { if (numLeasesToSteal <= 0) { if (log.isDebugEnabled()) { - log.debug(String.format("Worker %s not stealing from most loaded worker %s. He has %d," - + " target is %d, and I need %d", - workerIdentifier, - mostLoadedWorker.getKey(), - mostLoadedWorker.getValue(), - target, - needed)); + log.debug(String.format( + "Worker %s not stealing from most loaded worker %s. He has %d," + + " target is %d, and I need %d", + workerIdentifier, mostLoadedWorker.getKey(), mostLoadedWorker.getValue(), target, needed)); } return leasesToSteal; } else { if (log.isDebugEnabled()) { - log.debug("Worker {} will attempt to steal {} leases from most loaded worker {}. " - + " He has {} leases, target is {}, I need {}, maxLeasesToStealAtOneTime is {}.", + log.debug( + "Worker {} will attempt to steal {} leases from most loaded worker {}. " + + " He has {} leases, target is {}, I need {}, maxLeasesToStealAtOneTime is {}.", workerIdentifier, numLeasesToSteal, mostLoadedWorker.getKey(), @@ -573,19 +627,19 @@ public class DynamoDBLeaseTaker implements LeaseTaker { * Count leases by host. Always includes myself, but otherwise only includes hosts that are currently holding * leases. * - * @param expiredLeases list of leases that are currently expired + * @param availableLeases list of leases that are currently available * @return map of workerIdentifier to lease count */ @VisibleForTesting - Map computeLeaseCounts(List expiredLeases) { + Map computeLeaseCounts(List availableLeases) { Map leaseCounts = new HashMap<>(); // The set will give much faster lookup than the original list, an // important optimization when the list is large - Set expiredLeasesSet = new HashSet<>(expiredLeases); + Set availableLeasesSet = new HashSet<>(availableLeases); - // Compute the number of leases per worker by looking through allLeases and ignoring leases that have expired. + // Compute the number of leases per worker by looking through allLeases and ignoring leases that are available. for (Lease lease : allLeases.values()) { - if (!expiredLeasesSet.contains(lease)) { + if (!availableLeasesSet.contains(lease)) { String leaseOwner = lease.leaseOwner(); Integer oldCount = leaseCounts.get(leaseOwner); if (oldCount == null) { diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBMultiStreamLeaseSerializer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBMultiStreamLeaseSerializer.java index 78c9c6c4..66eb51e6 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBMultiStreamLeaseSerializer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBMultiStreamLeaseSerializer.java @@ -15,6 +15,8 @@ package software.amazon.kinesis.leases.dynamodb; +import java.util.Map; + import lombok.NoArgsConstructor; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; @@ -22,8 +24,6 @@ import software.amazon.kinesis.leases.DynamoUtils; import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.MultiStreamLease; -import java.util.Map; - import static software.amazon.kinesis.leases.MultiStreamLease.validateAndCast; @NoArgsConstructor @@ -44,14 +44,13 @@ public class DynamoDBMultiStreamLeaseSerializer extends DynamoDBLeaseSerializer @Override public MultiStreamLease fromDynamoRecord(Map dynamoRecord) { - final MultiStreamLease multiStreamLease = (MultiStreamLease) super - .fromDynamoRecord(dynamoRecord, new MultiStreamLease()); + final MultiStreamLease multiStreamLease = + (MultiStreamLease) super.fromDynamoRecord(dynamoRecord, new MultiStreamLease()); multiStreamLease.streamIdentifier(DynamoUtils.safeGetString(dynamoRecord, STREAM_ID_KEY)); multiStreamLease.shardId(DynamoUtils.safeGetString(dynamoRecord, SHARD_ID_KEY)); return multiStreamLease; } - @Override public Map getDynamoUpdateLeaseUpdate(Lease lease) { final MultiStreamLease multiStreamLease = validateAndCast(lease); diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java index 23022778..631d0473 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java @@ -35,5 +35,4 @@ public interface TableCreatorCallback { * Input object for table creator */ void performAction(TableCreatorCallbackInput tableCreatorCallbackInput); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java index edb31fdc..aadf6f2d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java @@ -34,6 +34,7 @@ import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; public class TableCreatorCallbackInput { @NonNull private final DynamoDbAsyncClient dynamoDbClient; + @NonNull private final String tableName; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/CustomerApplicationException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/CustomerApplicationException.java index ba97ab08..ed331e61 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/CustomerApplicationException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/CustomerApplicationException.java @@ -19,9 +19,15 @@ package software.amazon.kinesis.leases.exceptions; */ public class CustomerApplicationException extends Exception { - public CustomerApplicationException(Throwable e) { super(e);} + public CustomerApplicationException(Throwable e) { + super(e); + } - public CustomerApplicationException(String message, Throwable e) { super(message, e);} + public CustomerApplicationException(String message, Throwable e) { + super(message, e); + } - public CustomerApplicationException(String message) { super(message);} + public CustomerApplicationException(String message) { + super(message); + } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java index 8895d2cc..2cee9d2c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java @@ -30,5 +30,4 @@ public class DependencyException extends LeasingException { public DependencyException(String message, Throwable e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java index 416654ae..d43dd222 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java @@ -33,5 +33,4 @@ public class InvalidStateException extends LeasingException { public InvalidStateException(String message) { super(message); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasePendingDeletion.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasePendingDeletion.java index 2d3d0c2f..9c47a149 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasePendingDeletion.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasePendingDeletion.java @@ -15,6 +15,11 @@ package software.amazon.kinesis.leases.exceptions; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + import lombok.EqualsAndHashCode; import lombok.Value; import lombok.experimental.Accessors; @@ -23,31 +28,29 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.ShardDetector; import software.amazon.kinesis.leases.ShardInfo; -import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; - /** * Helper class for cleaning up leases. */ @Accessors(fluent = true) +@EqualsAndHashCode @Value -@EqualsAndHashCode(exclude = {"queueEntryTime"}) public class LeasePendingDeletion { - private final StreamIdentifier streamIdentifier; - private final Lease lease; - private final ShardInfo shardInfo; - private final ShardDetector shardDetector; + + StreamIdentifier streamIdentifier; + Lease lease; + ShardInfo shardInfo; + ShardDetector shardDetector; /** * Discovers the child shards for this lease. - * @return + * * @throws InterruptedException * @throws ExecutionException * @throws TimeoutException */ public Set getChildShardsFromService() throws InterruptedException, ExecutionException, TimeoutException { - return shardDetector.getChildShards(shardInfo.shardId()).stream().map(c -> c.shardId()).collect(Collectors.toSet()); + return shardDetector.getChildShards(shardInfo.shardId()).stream() + .map(c -> c.shardId()) + .collect(Collectors.toSet()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java index d2638882..58577525 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java @@ -32,5 +32,4 @@ public class LeasingException extends Exception { } private static final long serialVersionUID = 1L; - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java index da4c6ad7..cedb6de7 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java @@ -28,5 +28,4 @@ public class ProvisionedThroughputException extends LeasingException { public ProvisionedThroughputException(String message, Throwable e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java index f7ec12c5..d095b018 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java @@ -12,14 +12,13 @@ import software.amazon.kinesis.metrics.MetricsScope; * Helper class to sync leases with shards of the Kinesis stream. * It will create new leases/activities when it discovers new Kinesis shards (bootstrap/resharding). * It deletes leases for shards that have been trimmed from Kinesis, or if we've completed processing it - * and begun processing it's child shards. + * and begun processing its child shards. * *

    NOTE: This class is deprecated and will be removed in a future release.

    */ @Deprecated public class ShardSyncer { private static final HierarchicalShardSyncer HIERARCHICAL_SHARD_SYNCER = new HierarchicalShardSyncer(); - private static final boolean garbageCollectLeases = true; /** *

    NOTE: This method is deprecated and will be removed in a future release.

    @@ -35,11 +34,20 @@ public class ShardSyncer { * @throws KinesisClientLibIOException */ @Deprecated - public static synchronized void checkAndCreateLeasesForNewShards(@NonNull final ShardDetector shardDetector, - final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition, - final boolean ignoreUnexpectedChildShards, final MetricsScope scope) - throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException, InterruptedException { - HIERARCHICAL_SHARD_SYNCER.checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, initialPosition, - scope, ignoreUnexpectedChildShards, leaseRefresher.isLeaseTableEmpty()); + public static synchronized void checkAndCreateLeasesForNewShards( + @NonNull final ShardDetector shardDetector, + final LeaseRefresher leaseRefresher, + final InitialPositionInStreamExtended initialPosition, + final boolean ignoreUnexpectedChildShards, + final MetricsScope scope) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, + KinesisClientLibIOException, InterruptedException { + HIERARCHICAL_SHARD_SYNCER.checkAndCreateLeaseForNewShards( + shardDetector, + leaseRefresher, + initialPosition, + scope, + ignoreUnexpectedChildShards, + leaseRefresher.isLeaseTableEmpty()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java index 5f1ee18c..697172cb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java @@ -40,6 +40,7 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; public class BlockOnParentShardTask implements ConsumerTask { @NonNull private final ShardInfo shardInfo; + private final LeaseRefresher leaseRefresher; // Sleep for this duration if the parent shards have not completed processing, or we encounter an exception. private final long parentShardPollIntervalMillis; @@ -48,7 +49,7 @@ public class BlockOnParentShardTask implements ConsumerTask { /* * (non-Javadoc) - * + * * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#call() */ @Override @@ -94,12 +95,11 @@ public class BlockOnParentShardTask implements ConsumerTask { /* * (non-Javadoc) - * + * * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#taskType() */ @Override public TaskType taskType() { return taskType; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java index 1a416b65..3aa03b11 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java @@ -49,7 +49,7 @@ interface ConsumerState { /** * Provides the next state of the consumer if the task failed. This defaults to no state change. - * + * * @return the state to change to upon a task failure */ default ConsumerState failureTransition() { @@ -97,12 +97,11 @@ interface ConsumerState { /** * Indicates whether a state requires an external event to re-awaken for processing. - * + * * @return true if the state is some external event to restart processing, false if events can be immediately * dispatched. */ default boolean requiresAwake() { return false; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java index c4a87082..1ef197bd 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java @@ -24,7 +24,7 @@ import software.amazon.kinesis.retrieval.ThrottlingReporter; * and state transitions is contained within the {@link ConsumerState} objects. * *

    State Diagram

    - * + * *
      *       +-------------------+
      *       | Waiting on Parent |                               +------------------+
    @@ -82,7 +82,7 @@ class ConsumerStates {
             SHUTDOWN_REQUESTED(new ShutdownNotificationState()),
             SHUTTING_DOWN(new ShuttingDownState()),
             SHUTDOWN_COMPLETE(new ShutdownCompleteState());
    -        //@formatter:on
    +        // @formatter:on
     
             @Getter
             @Accessors(fluent = true)
    @@ -93,24 +93,11 @@ class ConsumerStates {
             }
         }
     
    -
         /**
          * The initial state that any {@link ShardConsumer} should start in.
          */
         static final ConsumerState INITIAL_STATE = ShardConsumerState.WAITING_ON_PARENT_SHARDS.consumerState();
     
    -    private static ConsumerState shutdownStateFor(ShutdownReason reason) {
    -        switch (reason) {
    -        case REQUESTED:
    -            return ShardConsumerState.SHUTDOWN_REQUESTED.consumerState();
    -        case SHARD_END:
    -        case LEASE_LOST:
    -            return ShardConsumerState.SHUTTING_DOWN.consumerState();
    -        default:
    -            throw new IllegalArgumentException("Unknown reason: " + reason);
    -        }
    -    }
    -
         /**
          * This is the initial state of a shard consumer. This causes the consumer to remain blocked until the all parent
          * shards have been completed.
    @@ -133,8 +120,10 @@ class ConsumerStates {
         static class BlockedOnParentState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument consumerArgument, ShardConsumer consumer, ProcessRecordsInput input) {
    -            return new BlockOnParentShardTask(consumerArgument.shardInfo(),
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument consumerArgument, ShardConsumer consumer, ProcessRecordsInput input) {
    +            return new BlockOnParentShardTask(
    +                    consumerArgument.shardInfo(),
                         consumerArgument.leaseCoordinator().leaseRefresher(),
                         consumerArgument.parentShardPollIntervalMillis());
             }
    @@ -197,11 +186,14 @@ class ConsumerStates {
         static class InitializingState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    -            return new InitializeTask(argument.shardInfo(),
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +            return new InitializeTask(
    +                    argument.shardInfo(),
                         argument.shardRecordProcessor(),
                         argument.checkpoint(),
    -                    argument.recordProcessorCheckpointer(), argument.initialPositionInStream(),
    +                    argument.recordProcessorCheckpointer(),
    +                    argument.initialPositionInStream(),
                         argument.recordsPublisher(),
                         argument.taskBackoffTimeMillis(),
                         argument.metricsFactory());
    @@ -257,9 +249,12 @@ class ConsumerStates {
         static class ProcessingState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    -            ThrottlingReporter throttlingReporter = new ThrottlingReporter(5, argument.shardInfo().shardId());
    -            return new ProcessTask(argument.shardInfo(),
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +            ThrottlingReporter throttlingReporter =
    +                    new ThrottlingReporter(5, argument.shardInfo().shardId());
    +            return new ProcessTask(
    +                    argument.shardInfo(),
                         argument.shardRecordProcessor(),
                         argument.recordProcessorCheckpointer(),
                         argument.taskBackoffTimeMillis(),
    @@ -271,8 +266,7 @@ class ConsumerStates {
                         argument.idleTimeInMilliseconds(),
                         argument.aggregatorUtil(),
                         argument.metricsFactory(),
    -                    argument.schemaRegistryDecoder()
    -            );
    +                    argument.schemaRegistryDecoder());
             }
     
             @Override
    @@ -335,9 +329,11 @@ class ConsumerStates {
         static class ShutdownNotificationState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
                 // TODO: notify shutdownrequested
    -            return new ShutdownNotificationTask(argument.shardRecordProcessor(),
    +            return new ShutdownNotificationTask(
    +                    argument.shardRecordProcessor(),
                         argument.recordProcessorCheckpointer(),
                         consumer.shutdownNotification(),
                         argument.shardInfo());
    @@ -370,7 +366,6 @@ class ConsumerStates {
             public boolean isTerminal() {
                 return false;
             }
    -
         }
     
         /**
    @@ -407,7 +402,8 @@ class ConsumerStates {
         static class ShutdownNotificationCompletionState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
                 return null;
             }
     
    @@ -484,9 +480,11 @@ class ConsumerStates {
         static class ShuttingDownState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
                 // TODO: set shutdown reason
    -            return new ShutdownTask(argument.shardInfo(),
    +            return new ShutdownTask(
    +                    argument.shardInfo(),
                         argument.shardDetector(),
                         argument.shardRecordProcessor(),
                         argument.recordProcessorCheckpointer(),
    @@ -528,7 +526,6 @@ class ConsumerStates {
             public boolean isTerminal() {
                 return false;
             }
    -
         }
     
         /**
    @@ -569,10 +566,8 @@ class ConsumerStates {
         static class ShutdownCompleteState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    -            if (consumer.shutdownNotification() != null) {
    -                consumer.shutdownNotification().shutdownComplete();
    -            }
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
                 return null;
             }
     
    @@ -600,7 +595,5 @@ class ConsumerStates {
             public boolean isTerminal() {
                 return true;
             }
    -
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java
    index 2e607661..ef4b4429 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java
    @@ -25,7 +25,7 @@ public interface ConsumerTask extends Callable {
         /**
          * Perform task logic.
          * E.g. perform set up (e.g. fetch records) and invoke a callback (e.g. processRecords() API).
    -     * 
    +     *
          * @return TaskResult (captures any exceptions encountered during execution of the task)
          */
         TaskResult call();
    @@ -34,5 +34,4 @@ public interface ConsumerTask extends Callable {
          * @return TaskType
          */
         TaskType taskType();
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java
    index 7816c1e1..705e1247 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java
    @@ -44,19 +44,25 @@ public class InitializeTask implements ConsumerTask {
     
         @NonNull
         private final ShardInfo shardInfo;
    +
         @NonNull
         private final ShardRecordProcessor shardRecordProcessor;
    +
         @NonNull
         private final Checkpointer checkpoint;
    +
         @NonNull
         private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer;
    +
         @NonNull
         private final InitialPositionInStreamExtended initialPositionInStream;
    +
         @NonNull
         private final RecordsPublisher cache;
     
         // Back off for this interval if we encounter a problem (exception)
         private final long backoffTimeMillis;
    +
         @NonNull
         private final MetricsFactory metricsFactory;
     
    @@ -78,7 +84,10 @@ public class InitializeTask implements ConsumerTask {
                 final String leaseKey = ShardInfo.getLeaseKey(shardInfo);
                 Checkpoint initialCheckpointObject = checkpoint.getCheckpointObject(leaseKey);
                 ExtendedSequenceNumber initialCheckpoint = initialCheckpointObject.checkpoint();
    -            log.debug("[{}]: Checkpoint: {} -- Initial Position: {}", leaseKey, initialCheckpoint,
    +            log.debug(
    +                    "[{}]: Checkpoint: {} -- Initial Position: {}",
    +                    leaseKey,
    +                    initialCheckpoint,
                         initialPositionInStream);
     
                 cache.start(initialCheckpoint, initialPositionInStream);
    @@ -94,8 +103,8 @@ public class InitializeTask implements ConsumerTask {
                         .pendingCheckpointState(initialCheckpointObject.pendingCheckpointState())
                         .build();
     
    -            final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory,
    -                    INITIALIZE_TASK_OPERATION);
    +            final MetricsScope scope =
    +                    MetricsUtil.createMetricsWithOperation(metricsFactory, INITIALIZE_TASK_OPERATION);
     
                 final long startTime = System.currentTimeMillis();
                 try {
    @@ -137,5 +146,4 @@ public class InitializeTask implements ConsumerTask {
         public TaskType taskType() {
             return taskType;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java
    index 85770dbf..04d3d394 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java
    @@ -21,11 +21,8 @@ import software.amazon.kinesis.lifecycle.events.TaskExecutionListenerInput;
      */
     public class NoOpTaskExecutionListener implements TaskExecutionListener {
         @Override
    -    public void beforeTaskExecution(TaskExecutionListenerInput input) {
    -    }
    +    public void beforeTaskExecution(TaskExecutionListenerInput input) {}
     
         @Override
    -    public void afterTaskExecution(TaskExecutionListenerInput input) {
    -    }
    +    public void afterTaskExecution(TaskExecutionListenerInput input) {}
     }
    -
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NotifyingSubscriber.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NotifyingSubscriber.java
    index f3599c71..bd69930b 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NotifyingSubscriber.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NotifyingSubscriber.java
    @@ -17,9 +17,9 @@ package software.amazon.kinesis.lifecycle;
     
     import org.reactivestreams.Subscriber;
     import org.reactivestreams.Subscription;
    +import software.amazon.kinesis.retrieval.RecordsDeliveryAck;
     import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.retrieval.RecordsRetrieved;
    -import software.amazon.kinesis.retrieval.RecordsDeliveryAck;
     
     /**
      * Subscriber that notifies its publisher on receipt of the onNext event.
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java
    index e4b38815..39a6bff6 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java
    @@ -28,8 +28,8 @@ import software.amazon.kinesis.leases.ShardDetector;
     import software.amazon.kinesis.leases.ShardInfo;
     import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
     import software.amazon.kinesis.metrics.MetricsFactory;
    -import software.amazon.kinesis.metrics.MetricsScope;
     import software.amazon.kinesis.metrics.MetricsLevel;
    +import software.amazon.kinesis.metrics.MetricsScope;
     import software.amazon.kinesis.metrics.MetricsUtil;
     import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.retrieval.AggregatorUtil;
    @@ -66,19 +66,20 @@ public class ProcessTask implements ConsumerTask {
         private final String shardInfoId;
         private final SchemaRegistryDecoder schemaRegistryDecoder;
     
    -    public ProcessTask(@NonNull ShardInfo shardInfo,
    -                       @NonNull ShardRecordProcessor shardRecordProcessor,
    -                       @NonNull ShardRecordProcessorCheckpointer recordProcessorCheckpointer,
    -                       long backoffTimeMillis,
    -                       boolean skipShardSyncAtWorkerInitializationIfLeasesExist,
    -                       ShardDetector shardDetector,
    -                       @NonNull ThrottlingReporter throttlingReporter,
    -                       ProcessRecordsInput processRecordsInput,
    -                       boolean shouldCallProcessRecordsEvenForEmptyRecordList,
    -                       long idleTimeInMilliseconds,
    -                       @NonNull AggregatorUtil aggregatorUtil,
    -                       @NonNull MetricsFactory metricsFactory,
    -                       SchemaRegistryDecoder schemaRegistryDecoder) {
    +    public ProcessTask(
    +            @NonNull ShardInfo shardInfo,
    +            @NonNull ShardRecordProcessor shardRecordProcessor,
    +            @NonNull ShardRecordProcessorCheckpointer recordProcessorCheckpointer,
    +            long backoffTimeMillis,
    +            boolean skipShardSyncAtWorkerInitializationIfLeasesExist,
    +            ShardDetector shardDetector,
    +            @NonNull ThrottlingReporter throttlingReporter,
    +            ProcessRecordsInput processRecordsInput,
    +            boolean shouldCallProcessRecordsEvenForEmptyRecordList,
    +            long idleTimeInMilliseconds,
    +            @NonNull AggregatorUtil aggregatorUtil,
    +            @NonNull MetricsFactory metricsFactory,
    +            SchemaRegistryDecoder schemaRegistryDecoder) {
             this.shardInfo = shardInfo;
             this.shardInfoId = ShardInfo.getLeaseKey(shardInfo);
             this.shardRecordProcessor = shardRecordProcessor;
    @@ -113,15 +114,18 @@ public class ProcessTask implements ConsumerTask {
          */
         @Override
         public TaskResult call() {
    -        /**
    +        /*
              * NOTE: the difference between appScope and shardScope is, appScope doesn't have shardId as a dimension,
              * therefore all data added to appScope, although from different shard consumer, will be sent to the same metric,
              * which is the app-level MillsBehindLatest metric.
              */
    -        final MetricsScope appScope = MetricsUtil.createMetricsWithOperation(metricsFactory, APPLICATION_TRACKER_OPERATION);
    +        final MetricsScope appScope =
    +                MetricsUtil.createMetricsWithOperation(metricsFactory, APPLICATION_TRACKER_OPERATION);
             final MetricsScope shardScope = MetricsUtil.createMetricsWithOperation(metricsFactory, PROCESS_TASK_OPERATION);
    -        shardInfo.streamIdentifierSerOpt()
    -                .ifPresent(streamId -> MetricsUtil.addStreamId(shardScope, StreamIdentifier.multiStreamInstance(streamId)));
    +        shardInfo
    +                .streamIdentifierSerOpt()
    +                .ifPresent(streamId ->
    +                        MetricsUtil.addStreamId(shardScope, StreamIdentifier.multiStreamInstance(streamId)));
             MetricsUtil.addShardId(shardScope, shardInfo.shardId());
             long startTimeMillis = System.currentTimeMillis();
             boolean success = false;
    @@ -132,13 +136,20 @@ public class ProcessTask implements ConsumerTask {
     
                 try {
                     if (processRecordsInput.millisBehindLatest() != null) {
    -                    shardScope.addData(MILLIS_BEHIND_LATEST_METRIC, processRecordsInput.millisBehindLatest(),
    -                            StandardUnit.MILLISECONDS, MetricsLevel.SUMMARY);
    -                    appScope.addData(MILLIS_BEHIND_LATEST_METRIC, processRecordsInput.millisBehindLatest(),
    -                            StandardUnit.MILLISECONDS, MetricsLevel.SUMMARY);
    +                    shardScope.addData(
    +                            MILLIS_BEHIND_LATEST_METRIC,
    +                            processRecordsInput.millisBehindLatest(),
    +                            StandardUnit.MILLISECONDS,
    +                            MetricsLevel.SUMMARY);
    +                    appScope.addData(
    +                            MILLIS_BEHIND_LATEST_METRIC,
    +                            processRecordsInput.millisBehindLatest(),
    +                            StandardUnit.MILLISECONDS,
    +                            MetricsLevel.SUMMARY);
                     }
     
    -                if (processRecordsInput.isAtShardEnd() && processRecordsInput.records().isEmpty()) {
    +                if (processRecordsInput.isAtShardEnd()
    +                        && processRecordsInput.records().isEmpty()) {
                         log.info("Reached end of shard {} and have no records to process", shardInfoId);
                         return new TaskResult(null, true);
                     }
    @@ -151,11 +162,14 @@ public class ProcessTask implements ConsumerTask {
                     }
     
                     if (!records.isEmpty()) {
    -                    shardScope.addData(RECORDS_PROCESSED_METRIC, records.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY);
    +                    shardScope.addData(
    +                            RECORDS_PROCESSED_METRIC, records.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY);
                     }
     
                     recordProcessorCheckpointer.largestPermittedCheckpointValue(filterAndGetMaxExtendedSequenceNumber(
    -                        shardScope, records, recordProcessorCheckpointer.lastCheckpointValue(),
    +                        shardScope,
    +                        records,
    +                        recordProcessorCheckpointer.lastCheckpointValue(),
                             recordProcessorCheckpointer.largestPermittedCheckpointValue()));
     
                     if (shouldCallProcessRecords(records)) {
    @@ -169,7 +183,10 @@ public class ProcessTask implements ConsumerTask {
                 }
     
                 if (processRecordsInput.isAtShardEnd()) {
    -                log.info("Reached end of shard {}, and processed {} records", shardInfoId, processRecordsInput.records().size());
    +                log.info(
    +                        "Reached end of shard {}, and processed {} records",
    +                        shardInfoId,
    +                        processRecordsInput.records().size());
                     return new TaskResult(null, true);
                 }
                 return new TaskResult(exception);
    @@ -180,13 +197,14 @@ public class ProcessTask implements ConsumerTask {
             }
         }
     
    -
    -
         private List deaggregateAnyKplRecords(List records) {
             if (shard == null) {
                 return aggregatorUtil.deaggregate(records);
             } else {
    -            return aggregatorUtil.deaggregate(records, shard.hashKeyRange().startingHashKey(), shard.hashKeyRange().endingHashKey());
    +            return aggregatorUtil.deaggregate(
    +                    records,
    +                    shard.hashKeyRange().startingHashKey(),
    +                    shard.hashKeyRange().endingHashKey());
             }
         }
     
    @@ -211,22 +229,30 @@ public class ProcessTask implements ConsumerTask {
          *            the records to be dispatched. It's possible the records have been adjusted by KPL deaggregation.
          */
         private void callProcessRecords(ProcessRecordsInput input, List records) {
    -        log.debug("Calling application processRecords() with {} records from {}", records.size(),
    -                shardInfoId);
    +        log.debug("Calling application processRecords() with {} records from {}", records.size(), shardInfoId);
     
    -        final ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder().records(records).cacheExitTime(input.cacheExitTime()).cacheEntryTime(input.cacheEntryTime())
    -                .isAtShardEnd(input.isAtShardEnd()).checkpointer(recordProcessorCheckpointer).millisBehindLatest(input.millisBehindLatest()).build();
    +        final ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder()
    +                .records(records)
    +                .cacheExitTime(input.cacheExitTime())
    +                .cacheEntryTime(input.cacheEntryTime())
    +                .isAtShardEnd(input.isAtShardEnd())
    +                .checkpointer(recordProcessorCheckpointer)
    +                .millisBehindLatest(input.millisBehindLatest())
    +                .build();
     
             final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, PROCESS_TASK_OPERATION);
    -        shardInfo.streamIdentifierSerOpt()
    +        shardInfo
    +                .streamIdentifierSerOpt()
                     .ifPresent(streamId -> MetricsUtil.addStreamId(scope, StreamIdentifier.multiStreamInstance(streamId)));
             MetricsUtil.addShardId(scope, shardInfo.shardId());
             final long startTime = System.currentTimeMillis();
             try {
                 shardRecordProcessor.processRecords(processRecordsInput);
             } catch (Exception e) {
    -            log.error("ShardId {}: Application processRecords() threw an exception when processing shard ",
    -                    shardInfoId, e);
    +            log.error(
    +                    "ShardId {}: Application processRecords() threw an exception when processing shard ",
    +                    shardInfoId,
    +                    e);
                 log.error("ShardId {}: Skipping over the following data records: {}", shardInfoId, records);
             } finally {
                 MetricsUtil.addLatency(scope, RECORD_PROCESSOR_PROCESS_RECORDS_METRIC, startTime, MetricsLevel.SUMMARY);
    @@ -245,28 +271,6 @@ public class ProcessTask implements ConsumerTask {
             return (!records.isEmpty()) || shouldCallProcessRecordsEvenForEmptyRecordList;
         }
     
    -    /**
    -     * Emits metrics, and sleeps if there are no records available
    -     *
    -     * @param startTimeMillis
    -     *            the time when the task started
    -     */
    -    private void handleNoRecords(long startTimeMillis) {
    -        log.debug("Kinesis didn't return any records for shard {}", shardInfoId);
    -
    -        long sleepTimeMillis = idleTimeInMilliseconds - (System.currentTimeMillis() - startTimeMillis);
    -        if (sleepTimeMillis > 0) {
    -            sleepTimeMillis = Math.max(sleepTimeMillis, idleTimeInMilliseconds);
    -            try {
    -                log.debug("Sleeping for {} ms since there were no new records in shard {}", sleepTimeMillis,
    -                        shardInfoId);
    -                Thread.sleep(sleepTimeMillis);
    -            } catch (InterruptedException e) {
    -                log.debug("ShardId {}: Sleep was interrupted", shardInfoId);
    -            }
    -        }
    -    }
    -
         @Override
         public TaskType taskType() {
             return taskType;
    @@ -286,21 +290,25 @@ public class ProcessTask implements ConsumerTask {
          *            previous largest permitted checkpoint value
          * @return the largest extended sequence number among the retained records
          */
    -    private ExtendedSequenceNumber filterAndGetMaxExtendedSequenceNumber(final MetricsScope scope,
    -                                                                         final List records,
    -                                                                         final ExtendedSequenceNumber lastCheckpointValue,
    -                                                                         final ExtendedSequenceNumber lastLargestPermittedCheckpointValue) {
    +    private ExtendedSequenceNumber filterAndGetMaxExtendedSequenceNumber(
    +            final MetricsScope scope,
    +            final List records,
    +            final ExtendedSequenceNumber lastCheckpointValue,
    +            final ExtendedSequenceNumber lastLargestPermittedCheckpointValue) {
             ExtendedSequenceNumber largestExtendedSequenceNumber = lastLargestPermittedCheckpointValue;
             ListIterator recordIterator = records.listIterator();
             while (recordIterator.hasNext()) {
                 KinesisClientRecord record = recordIterator.next();
    -            ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(record.sequenceNumber(),
    -                    record.subSequenceNumber());
    +            ExtendedSequenceNumber extendedSequenceNumber =
    +                    new ExtendedSequenceNumber(record.sequenceNumber(), record.subSequenceNumber());
     
                 if (extendedSequenceNumber.compareTo(lastCheckpointValue) <= 0) {
                     recordIterator.remove();
    -                log.debug("{} : removing record with ESN {} because the ESN is <= checkpoint ({})", shardInfoId,
    -                        extendedSequenceNumber, lastCheckpointValue);
    +                log.debug(
    +                        "{} : removing record with ESN {} because the ESN is <= checkpoint ({})",
    +                        shardInfoId,
    +                        extendedSequenceNumber,
    +                        lastCheckpointValue);
                     continue;
                 }
     
    @@ -309,10 +317,8 @@ public class ProcessTask implements ConsumerTask {
                     largestExtendedSequenceNumber = extendedSequenceNumber;
                 }
     
    -            scope.addData(DATA_BYTES_PROCESSED_METRIC, record.data().limit(), StandardUnit.BYTES,
    -                    MetricsLevel.SUMMARY);
    +            scope.addData(DATA_BYTES_PROCESSED_METRIC, record.data().limit(), StandardUnit.BYTES, MetricsLevel.SUMMARY);
             }
             return largestExtendedSequenceNumber;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java
    index 8902a262..be9f717b 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java
    @@ -23,15 +23,13 @@ import java.util.concurrent.ExecutorService;
     import java.util.concurrent.RejectedExecutionException;
     import java.util.function.Function;
     
    -import org.reactivestreams.Subscription;
    -
     import com.google.common.annotations.VisibleForTesting;
    -
     import lombok.AccessLevel;
     import lombok.Getter;
     import lombok.NonNull;
     import lombok.experimental.Accessors;
     import lombok.extern.slf4j.Slf4j;
    +import org.reactivestreams.Subscription;
     import software.amazon.kinesis.annotations.KinesisClientInternalApi;
     import software.amazon.kinesis.exceptions.internal.BlockedOnParentShardException;
     import software.amazon.kinesis.leases.ShardInfo;
    @@ -57,9 +55,16 @@ public class ShardConsumer {
         private final ExecutorService executorService;
         private final ShardInfo shardInfo;
         private final ShardConsumerArgument shardConsumerArgument;
    +
         @NonNull
         private final Optional logWarningForTaskAfterMillis;
    +
    +    /**
    +     * @deprecated unused; to be removed in a "major" version bump
    +     */
    +    @Deprecated
         private final Function taskMetricsDecorator;
    +
         private final int bufferSize;
         private final TaskExecutionListener taskExecutionListener;
         private final String streamIdentifier;
    @@ -80,8 +85,10 @@ public class ShardConsumer {
         private ConsumerState currentState;
     
         private final Object shutdownLock = new Object();
    +
         @Getter(AccessLevel.PUBLIC)
         private volatile ShutdownReason shutdownReason;
    +
         private volatile ShutdownNotification shutdownNotification;
     
         private final ShardConsumerSubscriber subscriber;
    @@ -89,41 +96,85 @@ public class ShardConsumer {
         private ProcessRecordsInput shardEndProcessRecordsInput;
     
         @Deprecated
    -    public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo,
    -            Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument,
    +    public ShardConsumer(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            ShardInfo shardInfo,
    +            Optional logWarningForTaskAfterMillis,
    +            ShardConsumerArgument shardConsumerArgument,
                 TaskExecutionListener taskExecutionListener) {
    -        this(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, shardConsumerArgument,
    +        this(
    +                recordsPublisher,
    +                executorService,
    +                shardInfo,
    +                logWarningForTaskAfterMillis,
    +                shardConsumerArgument,
                     ConsumerStates.INITIAL_STATE,
    -                ShardConsumer.metricsWrappingFunction(shardConsumerArgument.metricsFactory()), 8, taskExecutionListener,
    +                ShardConsumer.metricsWrappingFunction(shardConsumerArgument.metricsFactory()),
    +                8,
    +                taskExecutionListener,
                     LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
         }
     
    -    public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo,
    -            Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument,
    -            TaskExecutionListener taskExecutionListener, int readTimeoutsToIgnoreBeforeWarning) {
    -        this(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, shardConsumerArgument,
    +    public ShardConsumer(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            ShardInfo shardInfo,
    +            Optional logWarningForTaskAfterMillis,
    +            ShardConsumerArgument shardConsumerArgument,
    +            TaskExecutionListener taskExecutionListener,
    +            int readTimeoutsToIgnoreBeforeWarning) {
    +        this(
    +                recordsPublisher,
    +                executorService,
    +                shardInfo,
    +                logWarningForTaskAfterMillis,
    +                shardConsumerArgument,
                     ConsumerStates.INITIAL_STATE,
    -                ShardConsumer.metricsWrappingFunction(shardConsumerArgument.metricsFactory()), 8, taskExecutionListener,
    +                ShardConsumer.metricsWrappingFunction(shardConsumerArgument.metricsFactory()),
    +                8,
    +                taskExecutionListener,
                     readTimeoutsToIgnoreBeforeWarning);
         }
     
         @Deprecated
    -    public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo,
    -            Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument,
    -            ConsumerState initialState, Function taskMetricsDecorator, int bufferSize,
    +    public ShardConsumer(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            ShardInfo shardInfo,
    +            Optional logWarningForTaskAfterMillis,
    +            ShardConsumerArgument shardConsumerArgument,
    +            ConsumerState initialState,
    +            Function taskMetricsDecorator,
    +            int bufferSize,
                 TaskExecutionListener taskExecutionListener) {
    -        this(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, shardConsumerArgument,
    -                initialState, taskMetricsDecorator, bufferSize, taskExecutionListener,
    +        this(
    +                recordsPublisher,
    +                executorService,
    +                shardInfo,
    +                logWarningForTaskAfterMillis,
    +                shardConsumerArgument,
    +                initialState,
    +                taskMetricsDecorator,
    +                bufferSize,
    +                taskExecutionListener,
                     LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
         }
     
         //
         // TODO: Make bufferSize configurable
         //
    -    public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo,
    -            Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument,
    -            ConsumerState initialState, Function taskMetricsDecorator, int bufferSize,
    -            TaskExecutionListener taskExecutionListener, int readTimeoutsToIgnoreBeforeWarning) {
    +    public ShardConsumer(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            ShardInfo shardInfo,
    +            Optional logWarningForTaskAfterMillis,
    +            ShardConsumerArgument shardConsumerArgument,
    +            ConsumerState initialState,
    +            Function taskMetricsDecorator,
    +            int bufferSize,
    +            TaskExecutionListener taskExecutionListener,
    +            int readTimeoutsToIgnoreBeforeWarning) {
             this.recordsPublisher = recordsPublisher;
             this.executorService = executorService;
             this.shardInfo = shardInfo;
    @@ -133,8 +184,8 @@ public class ShardConsumer {
             this.taskExecutionListener = taskExecutionListener;
             this.currentState = initialState;
             this.taskMetricsDecorator = taskMetricsDecorator;
    -        subscriber = new ShardConsumerSubscriber(recordsPublisher, executorService, bufferSize, this,
    -                readTimeoutsToIgnoreBeforeWarning);
    +        subscriber = new ShardConsumerSubscriber(
    +                recordsPublisher, executorService, bufferSize, this, readTimeoutsToIgnoreBeforeWarning);
             this.bufferSize = bufferSize;
     
             if (this.shardInfo.isCompleted()) {
    @@ -173,13 +224,16 @@ public class ShardConsumer {
                             // Task rejection during the subscribe() call will not be propagated back as it not executed
                             // in the context of the Scheduler thread. Hence we should not assume the subscription will
                             // always be successful.
    +                        // But if subscription was not successful, then it will recover
    +                        // during healthCheck which will restart subscription.
    +                        // From Shardconsumer point of view, initialization after the below subscribe call
    +                        // is complete
                             subscribe();
                             needsInitialization = false;
                         }
                     }
                     stateChangeFuture = initializeComplete();
                 }
    -
             } catch (InterruptedException e) {
                 //
                 // Ignored should be handled by scheduler
    @@ -199,7 +253,6 @@ public class ShardConsumer {
                     throw (Error) t;
                 }
             }
    -
         }
     
         @VisibleForTesting
    @@ -213,8 +266,10 @@ public class ShardConsumer {
             }
             Throwable dispatchFailure = subscriber.getAndResetDispatchFailure();
             if (dispatchFailure != null) {
    -            log.warn("{} : Exception occurred while dispatching incoming data.  The incoming data has been skipped",
    -                    streamIdentifier, dispatchFailure);
    +            log.warn(
    +                    "{} : Exception occurred while dispatching incoming data.  The incoming data has been skipped",
    +                    streamIdentifier,
    +                    dispatchFailure);
                 return dispatchFailure;
             }
     
    @@ -230,8 +285,9 @@ public class ShardConsumer {
     
         String longRunningTaskMessage(Duration taken) {
             if (taken != null) {
    -            return String.format("Previous %s task still pending for shard %s since %s ago. ", currentTask.taskType(),
    -                    shardInfo.shardId(), taken);
    +            return String.format(
    +                    "Previous %s task still pending for shard %s since %s ago. ",
    +                    currentTask.taskType(), shardInfo.shardId(), taken);
             }
             return null;
         }
    @@ -272,46 +328,68 @@ public class ShardConsumer {
     
         @VisibleForTesting
         synchronized CompletableFuture initializeComplete() {
    +        if (!needsInitialization) {
    +            // initialization already complete, this must be a no-op.
    +            // ShardConsumer must be in ProcessingState and
    +            // any further activity will be driven by publisher pushing data to subscriber
    +            // which invokes handleInput and that triggers ProcessTask.
    +            // Scheduler is only meant to do health-checks to ensure the consumer
    +            // is not stuck for any reason and to do shutdown handling.
    +            return CompletableFuture.completedFuture(true);
    +        }
    +
             if (taskOutcome != null) {
                 updateState(taskOutcome);
             }
             if (currentState.state() == ConsumerStates.ShardConsumerState.PROCESSING) {
                 return CompletableFuture.completedFuture(true);
             }
    -        return CompletableFuture.supplyAsync(() -> {
    -            if (isShutdownRequested()) {
    -                throw new IllegalStateException("Shutdown requested while initializing");
    -            }
    -            executeTask(null);
    -            if (isShutdownRequested()) {
    -                throw new IllegalStateException("Shutdown requested while initializing");
    -            }
    -            return false;
    -        }, executorService);
    +        return CompletableFuture.supplyAsync(
    +                () -> {
    +                    if (isShutdownRequested()) {
    +                        throw new IllegalStateException("Shutdown requested while initializing");
    +                    }
    +                    executeTask(null);
    +                    if (isShutdownRequested()) {
    +                        throw new IllegalStateException("Shutdown requested while initializing");
    +                    }
    +                    return false;
    +                },
    +                executorService);
         }
     
         @VisibleForTesting
         CompletableFuture shutdownComplete() {
    -        return CompletableFuture.supplyAsync(() -> {
    -            synchronized (this) {
    -                if (taskOutcome != null) {
    -                    updateState(taskOutcome);
    -                } else {
    -                    //
    -                    // ShardConsumer has been asked to shutdown before the first task even had a chance to run.
    -                    // In this case generate a successful task outcome, and allow the shutdown to continue.
    -                    // This should only happen if the lease was lost before the initial state had a chance to run.
    -                    //
    -                    updateState(TaskOutcome.SUCCESSFUL);
    -                }
    -                if (isShutdown()) {
    -                    return true;
    -                }
    +        return CompletableFuture.supplyAsync(
    +                () -> {
    +                    synchronized (this) {
    +                        if (taskOutcome != null) {
    +                            updateState(taskOutcome);
    +                        } else {
    +                            //
    +                            // ShardConsumer has been asked to shutdown before the first task even had a chance to run.
    +                            // In this case generate a successful task outcome, and allow the shutdown to continue.
    +                            // This should only happen if the lease was lost before the initial state had a chance to
    +                            // run.
    +                            //
    +                            updateState(TaskOutcome.SUCCESSFUL);
    +                        }
    +                        if (isShutdown()) {
    +                            return true;
    +                        }
     
    -                executeTask(shardEndProcessRecordsInput);
    -                return false;
    -            }
    -        }, executorService);
    +                        executeTask(shardEndProcessRecordsInput);
    +
    +                        // call shutdownNotification.shutdownComplete() if shutting down as part of gracefulShutdown
    +                        if (currentState.state() == ConsumerStates.ShardConsumerState.SHUTTING_DOWN
    +                                && taskOutcome == TaskOutcome.SUCCESSFUL
    +                                && shutdownNotification != null) {
    +                            shutdownNotification.shutdownComplete();
    +                        }
    +                        return false;
    +                    }
    +                },
    +                executorService);
         }
     
         private synchronized void processData(ProcessRecordsInput input) {
    @@ -320,7 +398,9 @@ public class ShardConsumer {
     
         private synchronized void executeTask(ProcessRecordsInput input) {
             TaskExecutionListenerInput taskExecutionListenerInput = TaskExecutionListenerInput.builder()
    -                .shardInfo(shardInfo).taskType(currentState.taskType()).build();
    +                .shardInfo(shardInfo)
    +                .taskType(currentState.taskType())
    +                .build();
             taskExecutionListener.beforeTaskExecution(taskExecutionListenerInput);
             ConsumerTask task = currentState.createTask(shardConsumerArgument, ShardConsumer.this, input);
             if (task != null) {
    @@ -334,7 +414,9 @@ public class ShardConsumer {
                     taskIsRunning = false;
                 }
                 taskOutcome = resultToOutcome(result);
    -            taskExecutionListenerInput = taskExecutionListenerInput.toBuilder().taskOutcome(taskOutcome).build();
    +            taskExecutionListenerInput = taskExecutionListenerInput.toBuilder()
    +                    .taskOutcome(taskOutcome)
    +                    .build();
             }
             taskExecutionListener.afterTaskExecution(taskExecutionListenerInput);
         }
    @@ -353,19 +435,19 @@ public class ShardConsumer {
         private synchronized void updateState(TaskOutcome outcome) {
             ConsumerState nextState = currentState;
             switch (outcome) {
    -        case SUCCESSFUL:
    -            nextState = currentState.successTransition();
    -            break;
    -        case END_OF_SHARD:
    -            markForShutdown(ShutdownReason.SHARD_END);
    -            break;
    -        case FAILURE:
    -            nextState = currentState.failureTransition();
    -            break;
    -        default:
    -            log.error("{} : No handler for outcome of {}", streamIdentifier, outcome.name());
    -            nextState = currentState.failureTransition();
    -            break;
    +            case SUCCESSFUL:
    +                nextState = currentState.successTransition();
    +                break;
    +            case END_OF_SHARD:
    +                markForShutdown(ShutdownReason.SHARD_END);
    +                break;
    +            case FAILURE:
    +                nextState = currentState.failureTransition();
    +                break;
    +            default:
    +                log.error("{} : No handler for outcome of {}", streamIdentifier, outcome.name());
    +                nextState = currentState.failureTransition();
    +                break;
             }
     
             nextState = handleShutdownTransition(outcome, nextState);
    @@ -387,9 +469,16 @@ public class ShardConsumer {
                 Exception taskException = taskResult.getException();
                 if (taskException instanceof BlockedOnParentShardException) {
                     // No need to log the stack trace for this exception (it is very specific).
    -                log.debug("{} : Shard {} is blocked on completion of parent shard.", streamIdentifier, shardInfo.shardId());
    +                log.debug(
    +                        "{} : Shard {} is blocked on completion of parent shard.",
    +                        streamIdentifier,
    +                        shardInfo.shardId());
                 } else {
    -                log.debug("{} : Caught exception running {} task: ", streamIdentifier, currentTask.taskType(), taskResult.getException());
    +                log.debug(
    +                        "{} : Caught exception running {} task: ",
    +                        streamIdentifier,
    +                        currentTask.taskType(),
    +                        taskResult.getException());
                 }
             }
         }
    @@ -470,5 +559,4 @@ public class ShardConsumer {
                 }
             };
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java
    index 0518b830..bc1b9d20 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java
    @@ -15,6 +15,8 @@
     
     package software.amazon.kinesis.lifecycle;
     
    +import java.util.concurrent.ExecutorService;
    +
     import lombok.Data;
     import lombok.NonNull;
     import lombok.experimental.Accessors;
    @@ -22,11 +24,11 @@ import software.amazon.kinesis.annotations.KinesisClientInternalApi;
     import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer;
     import software.amazon.kinesis.common.InitialPositionInStreamExtended;
     import software.amazon.kinesis.common.StreamIdentifier;
    +import software.amazon.kinesis.leases.HierarchicalShardSyncer;
     import software.amazon.kinesis.leases.LeaseCleanupManager;
     import software.amazon.kinesis.leases.LeaseCoordinator;
     import software.amazon.kinesis.leases.ShardDetector;
     import software.amazon.kinesis.leases.ShardInfo;
    -import software.amazon.kinesis.leases.HierarchicalShardSyncer;
     import software.amazon.kinesis.metrics.MetricsFactory;
     import software.amazon.kinesis.processor.Checkpointer;
     import software.amazon.kinesis.processor.ShardRecordProcessor;
    @@ -34,28 +36,34 @@ import software.amazon.kinesis.retrieval.AggregatorUtil;
     import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.schemaregistry.SchemaRegistryDecoder;
     
    -import java.util.concurrent.ExecutorService;
    -
     @Data
     @Accessors(fluent = true)
     @KinesisClientInternalApi
     public class ShardConsumerArgument {
         @NonNull
         private final ShardInfo shardInfo;
    +
         @NonNull
         private final StreamIdentifier streamIdentifier;
    +
         @NonNull
         private final LeaseCoordinator leaseCoordinator;
    +
         @NonNull
         private final ExecutorService executorService;
    +
         @NonNull
         private final RecordsPublisher recordsPublisher;
    +
         @NonNull
         private final ShardRecordProcessor shardRecordProcessor;
    +
         @NonNull
         private final Checkpointer checkpoint;
    +
         @NonNull
         private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer;
    +
         private final long parentShardPollIntervalMillis;
         private final long taskBackoffTimeMillis;
         private final boolean skipShardSyncAtWorkerInitializationIfLeasesExist;
    @@ -63,16 +71,22 @@ public class ShardConsumerArgument {
         private final int maxListShardsRetryAttempts;
         private final boolean shouldCallProcessRecordsEvenForEmptyRecordList;
         private final long idleTimeInMilliseconds;
    +
         @NonNull
         private final InitialPositionInStreamExtended initialPositionInStream;
    +
         private final boolean cleanupLeasesOfCompletedShards;
         private final boolean ignoreUnexpectedChildShards;
    +
         @NonNull
         private final ShardDetector shardDetector;
    +
         private final AggregatorUtil aggregatorUtil;
         private final HierarchicalShardSyncer hierarchicalShardSyncer;
    +
         @NonNull
         private final MetricsFactory metricsFactory;
    +
         private final LeaseCleanupManager leaseCleanupManager;
         private final SchemaRegistryDecoder schemaRegistryDecoder;
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerNotifyingSubscriber.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerNotifyingSubscriber.java
    index 3ef9fc1d..b97c0a1f 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerNotifyingSubscriber.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerNotifyingSubscriber.java
    @@ -18,9 +18,9 @@ package software.amazon.kinesis.lifecycle;
     import lombok.AllArgsConstructor;
     import org.reactivestreams.Subscriber;
     import software.amazon.kinesis.annotations.KinesisClientInternalApi;
    +import software.amazon.kinesis.retrieval.RecordsDeliveryAck;
     import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.retrieval.RecordsRetrieved;
    -import software.amazon.kinesis.retrieval.RecordsDeliveryAck;
     
     @KinesisClientInternalApi
     @AllArgsConstructor
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java
    index c4065049..98dd50de 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java
    @@ -19,7 +19,7 @@ import java.util.concurrent.CountDownLatch;
     import software.amazon.kinesis.annotations.KinesisClientInternalApi;
     import software.amazon.kinesis.leases.Lease;
     import software.amazon.kinesis.leases.LeaseCoordinator;
    -import software.amazon.kinesis.processor.ShutdownNotificationAware;
    +import software.amazon.kinesis.processor.ShardRecordProcessor;
     
     /**
      * Contains callbacks for completion of stages in a requested record processor shutdown.
    @@ -38,22 +38,23 @@ public class ShardConsumerShutdownNotification implements ShutdownNotification {
     
         /**
          * Creates a new shutdown request object.
    -     * 
    +     *
          * @param leaseCoordinator
          *            the lease coordinator used to drop leases from once the initial shutdown request is completed.
          * @param lease
          *            the lease that this shutdown request will free once initial shutdown is complete
          * @param notificationCompleteLatch
          *            used to inform the caller once the
    -     *            {@link ShutdownNotificationAware} object has been
    +     *            {@link ShardRecordProcessor} object has been
          *            notified of the shutdown request.
          * @param shutdownCompleteLatch
          *            used to inform the caller once the record processor is fully shutdown
          */
    -    public ShardConsumerShutdownNotification(final LeaseCoordinator leaseCoordinator,
    -                                             final Lease lease,
    -                                             final CountDownLatch notificationCompleteLatch,
    -                                             final CountDownLatch shutdownCompleteLatch) {
    +    public ShardConsumerShutdownNotification(
    +            final LeaseCoordinator leaseCoordinator,
    +            final Lease lease,
    +            final CountDownLatch notificationCompleteLatch,
    +            final CountDownLatch shutdownCompleteLatch) {
             this.leaseCoordinator = leaseCoordinator;
             this.lease = lease;
             this.notificationCompleteLatch = notificationCompleteLatch;
    @@ -85,5 +86,4 @@ public class ShardConsumerShutdownNotification implements ShutdownNotification {
             shutdownCompleteLatch.countDown();
             allNotificationCompleted = true;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriber.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriber.java
    index e8406d92..e61a351c 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriber.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriber.java
    @@ -14,6 +14,10 @@
      */
     package software.amazon.kinesis.lifecycle;
     
    +import java.time.Duration;
    +import java.time.Instant;
    +import java.util.concurrent.ExecutorService;
    +
     import com.google.common.annotations.VisibleForTesting;
     import io.reactivex.rxjava3.core.Flowable;
     import io.reactivex.rxjava3.core.Scheduler;
    @@ -29,10 +33,6 @@ import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.retrieval.RecordsRetrieved;
     import software.amazon.kinesis.retrieval.RetryableRetrievalException;
     
    -import java.time.Duration;
    -import java.time.Instant;
    -import java.util.concurrent.ExecutorService;
    -
     @Slf4j
     @Accessors(fluent = true)
     class ShardConsumerSubscriber implements Subscriber {
    @@ -43,6 +43,7 @@ class ShardConsumerSubscriber implements Subscriber {
         private final int readTimeoutsToIgnoreBeforeWarning;
         private final String shardInfoId;
         private volatile int readTimeoutSinceLastRead = 0;
    +
         @VisibleForTesting
         final Object lockObject = new Object();
         // This holds the last time an attempt of request to upstream service was made including the first try to
    @@ -51,21 +52,36 @@ class ShardConsumerSubscriber implements Subscriber {
         private RecordsRetrieved lastAccepted = null;
     
         private Subscription subscription;
    +
         @Getter
         private volatile Instant lastDataArrival;
    +
         @Getter
         private volatile Throwable dispatchFailure;
    +
         @Getter(AccessLevel.PACKAGE)
         private volatile Throwable retrievalFailure;
     
         @Deprecated
    -    ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
    -                            ShardConsumer shardConsumer) {
    -        this(recordsPublisher,executorService,bufferSize,shardConsumer, LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
    +    ShardConsumerSubscriber(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            int bufferSize,
    +            ShardConsumer shardConsumer) {
    +        this(
    +                recordsPublisher,
    +                executorService,
    +                bufferSize,
    +                shardConsumer,
    +                LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
         }
     
    -    ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
    -            ShardConsumer shardConsumer, int readTimeoutsToIgnoreBeforeWarning) {
    +    ShardConsumerSubscriber(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            int bufferSize,
    +            ShardConsumer shardConsumer,
    +            int readTimeoutsToIgnoreBeforeWarning) {
             this.recordsPublisher = recordsPublisher;
             this.scheduler = Schedulers.from(executorService);
             this.bufferSize = bufferSize;
    @@ -74,7 +90,6 @@ class ShardConsumerSubscriber implements Subscriber {
             this.shardInfoId = ShardInfo.getLeaseKey(shardConsumer.shardInfo());
         }
     
    -
         void startSubscriptions() {
             synchronized (lockObject) {
                 // Setting the lastRequestTime to allow for health checks to restart subscriptions if they failed to
    @@ -83,7 +98,9 @@ class ShardConsumerSubscriber implements Subscriber {
                 if (lastAccepted != null) {
                     recordsPublisher.restartFrom(lastAccepted);
                 }
    -            Flowable.fromPublisher(recordsPublisher).subscribeOn(scheduler).observeOn(scheduler, true, bufferSize)
    +            Flowable.fromPublisher(recordsPublisher)
    +                    .subscribeOn(scheduler)
    +                    .observeOn(scheduler, true, bufferSize)
                         .subscribe(new ShardConsumerNotifyingSubscriber(this, recordsPublisher));
             }
         }
    @@ -108,8 +125,8 @@ class ShardConsumerSubscriber implements Subscriber {
             Throwable oldFailure = null;
             if (retrievalFailure != null) {
                 synchronized (lockObject) {
    -                String logMessage = String.format("%s: Failure occurred in retrieval.  Restarting data requests",
    -                        shardInfoId);
    +                String logMessage =
    +                        String.format("%s: Failure occurred in retrieval.  Restarting data requests", shardInfoId);
                     if (retrievalFailure instanceof RetryableRetrievalException) {
                         log.debug(logMessage, retrievalFailure.getCause());
                     } else {
    @@ -131,8 +148,14 @@ class ShardConsumerSubscriber implements Subscriber {
                     Duration timeSinceLastResponse = Duration.between(lastRequestTime, now);
                     if (timeSinceLastResponse.toMillis() > maxTimeBetweenRequests) {
                         log.error(
    +                            // CHECKSTYLE.OFF: LineLength
                                 "{}: Last request was dispatched at {}, but no response as of {} ({}).  Cancelling subscription, and restarting. Last successful request details -- {}",
    -                            shardInfoId, lastRequestTime, now, timeSinceLastResponse, recordsPublisher.getLastSuccessfulRequestDetails());
    +                            // CHECKSTYLE.ON: LineLength
    +                            shardInfoId,
    +                            lastRequestTime,
    +                            now,
    +                            timeSinceLastResponse,
    +                            recordsPublisher.getLastSuccessfulRequestDetails());
                         cancel();
     
                         // Start the subscription again which will update the lastRequestTime as well.
    @@ -155,7 +178,10 @@ class ShardConsumerSubscriber implements Subscriber {
                     lastRequestTime = null;
                 }
                 lastDataArrival = Instant.now();
    -            shardConsumer.handleInput(input.processRecordsInput().toBuilder().cacheExitTime(Instant.now()).build(),
    +            shardConsumer.handleInput(
    +                    input.processRecordsInput().toBuilder()
    +                            .cacheExitTime(Instant.now())
    +                            .build(),
                         subscription);
     
             } catch (Throwable t) {
    @@ -195,23 +221,27 @@ class ShardConsumerSubscriber implements Subscriber {
             log.warn(
                     "{}: onError().  Cancelling subscription, and marking self as failed. KCL will "
                             + "recreate the subscription as necessary to continue processing. Last successful request details -- {}",
    -                shardInfoId, recordsPublisher.getLastSuccessfulRequestDetails(), t);
    +                shardInfoId,
    +                recordsPublisher.getLastSuccessfulRequestDetails(),
    +                t);
         }
     
         protected void logOnErrorReadTimeoutWarning(Throwable t) {
    -        log.warn("{}: onError().  Cancelling subscription, and marking self as failed. KCL will"
    -                + " recreate the subscription as necessary to continue processing. If you"
    -                + " are seeing this warning frequently consider increasing the SDK timeouts"
    -                + " by providing an OverrideConfiguration to the kinesis client. Alternatively you"
    -                + " can configure LifecycleConfig.readTimeoutsToIgnoreBeforeWarning to suppress"
    -                + " intermittent ReadTimeout warnings. Last successful request details -- {}",
    -                shardInfoId, recordsPublisher.getLastSuccessfulRequestDetails(), t);
    +        log.warn(
    +                "{}: onError().  Cancelling subscription, and marking self as failed. KCL will"
    +                        + " recreate the subscription as necessary to continue processing. If you"
    +                        + " are seeing this warning frequently consider increasing the SDK timeouts"
    +                        + " by providing an OverrideConfiguration to the kinesis client. Alternatively you"
    +                        + " can configure LifecycleConfig.readTimeoutsToIgnoreBeforeWarning to suppress"
    +                        + " intermittent ReadTimeout warnings. Last successful request details -- {}",
    +                shardInfoId,
    +                recordsPublisher.getLastSuccessfulRequestDetails(),
    +                t);
         }
     
         @Override
         public void onComplete() {
    -        log.debug("{}: onComplete(): Received onComplete.  Activity should be triggered externally",
    -                shardInfoId);
    +        log.debug("{}: onComplete(): Received onComplete.  Activity should be triggered externally", shardInfoId);
         }
     
         public void cancel() {
    @@ -219,5 +249,4 @@ class ShardConsumerSubscriber implements Subscriber {
                 subscription.cancel();
             }
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java
    index 5757b32a..3885728f 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java
    @@ -19,8 +19,8 @@ import lombok.EqualsAndHashCode;
     import lombok.Getter;
     import lombok.ToString;
     import lombok.experimental.Accessors;
    -import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.processor.RecordProcessorCheckpointer;
    +import software.amazon.kinesis.processor.ShardRecordProcessor;
     
     /**
      * Container for the parameters to the IRecordProcessor's
    @@ -50,5 +50,4 @@ public class ShutdownInput {
          * @return The checkpointer object that the record processor should use to checkpoint
          */
         private final RecordProcessorCheckpointer checkpointer;
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java
    index 8b29d4df..5356cd23 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java
    @@ -33,14 +33,16 @@ public class ShutdownNotificationTask implements ConsumerTask {
         private final ShardRecordProcessor shardRecordProcessor;
         private final RecordProcessorCheckpointer recordProcessorCheckpointer;
         private final ShutdownNotification shutdownNotification;
    -//    TODO: remove if not used
    +    //    TODO: remove if not used
         private final ShardInfo shardInfo;
     
         @Override
         public TaskResult call() {
             try {
                 try {
    -                shardRecordProcessor.shutdownRequested(ShutdownRequestedInput.builder().checkpointer(recordProcessorCheckpointer).build());
    +                shardRecordProcessor.shutdownRequested(ShutdownRequestedInput.builder()
    +                        .checkpointer(recordProcessorCheckpointer)
    +                        .build());
                 } catch (Exception ex) {
                     return new TaskResult(ex);
                 }
    @@ -55,5 +57,4 @@ public class ShutdownNotificationTask implements ConsumerTask {
         public TaskType taskType() {
             return TaskType.SHUTDOWN_NOTIFICATION;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java
    index 4a07ed7d..0be5a706 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java
    @@ -21,7 +21,6 @@ import software.amazon.kinesis.processor.ShardRecordProcessor;
     
     import static software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState;
     
    -
     /**
      * Reason the ShardRecordProcessor is being shutdown.
      * Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered).
    @@ -55,6 +54,7 @@ public enum ShutdownReason {
         REQUESTED(1, ShardConsumerState.SHUTDOWN_REQUESTED.consumerState());
     
         private final int rank;
    +
         @Getter(AccessLevel.PACKAGE)
         @Accessors(fluent = true)
         private final ConsumerState shutdownState;
    @@ -66,7 +66,7 @@ public enum ShutdownReason {
     
         /**
          * Indicates whether the given reason can override the current reason.
    -     * 
    +     *
          * @param reason the reason to transition to
          * @return true if the transition is allowed, false if it's not.
          */
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java
    index c436f38a..4059719f 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java
    @@ -14,12 +14,14 @@
      */
     package software.amazon.kinesis.lifecycle;
     
    -import com.google.common.annotations.VisibleForTesting;
    -
     import java.util.List;
     import java.util.Objects;
     import java.util.Optional;
    +import java.util.Random;
    +import java.util.Set;
    +import java.util.stream.Collectors;
     
    +import com.google.common.annotations.VisibleForTesting;
     import lombok.NonNull;
     import lombok.RequiredArgsConstructor;
     import lombok.extern.slf4j.Slf4j;
    @@ -34,13 +36,14 @@ import software.amazon.kinesis.leases.HierarchicalShardSyncer;
     import software.amazon.kinesis.leases.Lease;
     import software.amazon.kinesis.leases.LeaseCleanupManager;
     import software.amazon.kinesis.leases.LeaseCoordinator;
    +import software.amazon.kinesis.leases.LeaseRefresher;
     import software.amazon.kinesis.leases.ShardDetector;
     import software.amazon.kinesis.leases.ShardInfo;
     import software.amazon.kinesis.leases.UpdateField;
     import software.amazon.kinesis.leases.exceptions.CustomerApplicationException;
     import software.amazon.kinesis.leases.exceptions.DependencyException;
    -import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion;
     import software.amazon.kinesis.leases.exceptions.InvalidStateException;
    +import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion;
     import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException;
     import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
     import software.amazon.kinesis.lifecycle.events.ShardEndedInput;
    @@ -52,11 +55,6 @@ import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
     
    -import java.util.Random;
    -import java.util.Set;
    -import java.util.function.Function;
    -import java.util.stream.Collectors;
    -
     /**
      * Task for invoking the ShardRecordProcessor shutdown() callback.
      */
    @@ -66,43 +64,63 @@ import java.util.stream.Collectors;
     public class ShutdownTask implements ConsumerTask {
         private static final String SHUTDOWN_TASK_OPERATION = "ShutdownTask";
         private static final String RECORD_PROCESSOR_SHUTDOWN_METRIC = "RecordProcessor.shutdown";
    +
    +    /**
    +     * Reusable, immutable {@link LeaseLostInput}.
    +     */
    +    private static final LeaseLostInput LEASE_LOST_INPUT =
    +            LeaseLostInput.builder().build();
    +
    +    private static final Random RANDOM = new Random();
    +
         @VisibleForTesting
         static final int RETRY_RANDOM_MAX_RANGE = 30;
     
         @NonNull
         private final ShardInfo shardInfo;
    +
         @NonNull
         private final ShardDetector shardDetector;
    +
         @NonNull
         private final ShardRecordProcessor shardRecordProcessor;
    +
         @NonNull
         private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer;
    +
         @NonNull
         private final ShutdownReason reason;
    +
         @NonNull
         private final InitialPositionInStreamExtended initialPositionInStream;
    +
         private final boolean cleanupLeasesOfCompletedShards;
         private final boolean ignoreUnexpectedChildShards;
    +
         @NonNull
         private final LeaseCoordinator leaseCoordinator;
    +
         private final long backoffTimeMillis;
    +
         @NonNull
         private final RecordsPublisher recordsPublisher;
    +
         @NonNull
         private final HierarchicalShardSyncer hierarchicalShardSyncer;
    +
         @NonNull
         private final MetricsFactory metricsFactory;
     
         private final TaskType taskType = TaskType.SHUTDOWN;
     
         private final List childShards;
    +
         @NonNull
         private final StreamIdentifier streamIdentifier;
    +
         @NonNull
         private final LeaseCleanupManager leaseCleanupManager;
     
    -    private static final Function leaseKeyProvider = shardInfo -> ShardInfo.getLeaseKey(shardInfo);
    -
         /*
          * Invokes ShardRecordProcessor shutdown() API.
          * (non-Javadoc)
    @@ -114,82 +132,91 @@ public class ShutdownTask implements ConsumerTask {
             recordProcessorCheckpointer.checkpointer().operation(SHUTDOWN_TASK_OPERATION);
             final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, SHUTDOWN_TASK_OPERATION);
     
    -        Exception exception;
    -
    +        final String leaseKey = ShardInfo.getLeaseKey(shardInfo);
             try {
                 try {
    -                log.debug("Invoking shutdown() for shard {} with childShards {}, concurrencyToken {}. Shutdown reason: {}",
    -                        leaseKeyProvider.apply(shardInfo), childShards, shardInfo.concurrencyToken(), reason);
    +                log.debug(
    +                        "Invoking shutdown() for shard {} with childShards {}, concurrencyToken {}. Shutdown reason: {}",
    +                        leaseKey,
    +                        childShards,
    +                        shardInfo.concurrencyToken(),
    +                        reason);
     
                     final long startTime = System.currentTimeMillis();
    -                final Lease currentShardLease = leaseCoordinator.getCurrentlyHeldLease(leaseKeyProvider.apply(shardInfo));
    -                final Runnable leaseLostAction = () -> shardRecordProcessor.leaseLost(LeaseLostInput.builder().build());
    +                final Lease currentShardLease = leaseCoordinator.getCurrentlyHeldLease(leaseKey);
    +                final Runnable leaseLostAction = () -> shardRecordProcessor.leaseLost(LEASE_LOST_INPUT);
     
                     if (reason == ShutdownReason.SHARD_END) {
                         try {
    -                        takeShardEndAction(currentShardLease, scope, startTime);
    +                        takeShardEndAction(currentShardLease, leaseKey, scope, startTime);
                         } catch (InvalidStateException e) {
                             // If InvalidStateException happens, it indicates we have a non recoverable error in short term.
    -                        // In this scenario, we should shutdown the shardConsumer with LEASE_LOST reason to allow other worker to take the lease and retry shutting down.
    -                        log.warn("Lease {}: Invalid state encountered while shutting down shardConsumer with SHARD_END reason. " +
    -                                "Dropping the lease and shutting down shardConsumer using LEASE_LOST reason. ", leaseKeyProvider.apply(shardInfo), e);
    -                        dropLease(currentShardLease);
    -                        throwOnApplicationException(leaseLostAction, scope, startTime);
    +                        // In this scenario, we should shutdown the shardConsumer with LEASE_LOST reason to allow
    +                        // other worker to take the lease and retry shutting down.
    +                        log.warn(
    +                                "Lease {}: Invalid state encountered while shutting down shardConsumer with SHARD_END reason. "
    +                                        + "Dropping the lease and shutting down shardConsumer using LEASE_LOST reason.",
    +                                leaseKey,
    +                                e);
    +                        dropLease(currentShardLease, leaseKey);
    +                        throwOnApplicationException(leaseKey, leaseLostAction, scope, startTime);
                         }
                     } else {
    -                    throwOnApplicationException(leaseLostAction, scope, startTime);
    +                    throwOnApplicationException(leaseKey, leaseLostAction, scope, startTime);
                     }
     
    -                log.debug("Shutting down retrieval strategy for shard {}.", leaseKeyProvider.apply(shardInfo));
    +                log.debug("Shutting down retrieval strategy for shard {}.", leaseKey);
                     recordsPublisher.shutdown();
    -                log.debug("Record processor completed shutdown() for shard {}", leaseKeyProvider.apply(shardInfo));
    +
    +                log.debug("Record processor completed shutdown() for shard {}", leaseKey);
     
                     return new TaskResult(null);
                 } catch (Exception e) {
                     if (e instanceof CustomerApplicationException) {
    -                    log.error("Shard {}: Application exception. ", leaseKeyProvider.apply(shardInfo), e);
    +                    log.error("Shard {}: Application exception.", leaseKey, e);
                     } else {
    -                    log.error("Shard {}: Caught exception: ", leaseKeyProvider.apply(shardInfo), e);
    +                    log.error("Shard {}: Caught exception:", leaseKey, e);
                     }
    -                exception = e;
                     // backoff if we encounter an exception.
                     try {
                         Thread.sleep(this.backoffTimeMillis);
                     } catch (InterruptedException ie) {
    -                    log.debug("Shard {}: Interrupted sleep", leaseKeyProvider.apply(shardInfo), ie);
    +                    log.debug("Shard {}: Interrupted sleep", leaseKey, ie);
                     }
    +
    +                return new TaskResult(e);
                 }
             } finally {
                 MetricsUtil.endScope(scope);
             }
    -
    -        return new TaskResult(exception);
         }
     
         // Involves persisting child shard info, attempt to checkpoint and enqueueing lease for cleanup.
    -    private void takeShardEndAction(Lease currentShardLease,
    -            MetricsScope scope, long startTime)
    +    private void takeShardEndAction(Lease currentShardLease, final String leaseKey, MetricsScope scope, long startTime)
                 throws DependencyException, ProvisionedThroughputException, InvalidStateException,
    -            CustomerApplicationException {
    +                    CustomerApplicationException {
             // Create new lease for the child shards if they don't exist.
    -        // We have one valid scenario that shutdown task got created with SHARD_END reason and an empty list of childShards.
    -        // This would happen when KinesisDataFetcher(for polling mode) or FanOutRecordsPublisher(for StoS mode) catches ResourceNotFound exception.
    -        // In this case, KinesisDataFetcher and FanOutRecordsPublisher will send out SHARD_END signal to trigger a shutdown task with empty list of childShards.
    +        // We have one valid scenario that shutdown task got created with SHARD_END reason and an empty list of
    +        // childShards.
    +        // This would happen when KinesisDataFetcher(for polling mode) or FanOutRecordsPublisher(for StoS mode) catches
    +        // ResourceNotFound exception.
    +        // In this case, KinesisDataFetcher and FanOutRecordsPublisher will send out SHARD_END signal to trigger a
    +        // shutdown task with empty list of childShards.
             // This scenario could happen when customer deletes the stream while leaving the KCL application running.
             if (currentShardLease == null) {
    -            throw new InvalidStateException(leaseKeyProvider.apply(shardInfo)
    -                    + " : Lease not owned by the current worker. Leaving ShardEnd handling to new owner.");
    +            throw new InvalidStateException(
    +                    leaseKey + " : Lease not owned by the current worker. Leaving ShardEnd handling to new owner.");
             }
             if (!CollectionUtils.isNullOrEmpty(childShards)) {
    -            createLeasesForChildShardsIfNotExist();
    +            createLeasesForChildShardsIfNotExist(scope);
                 updateLeaseWithChildShards(currentShardLease);
             }
    -        final LeasePendingDeletion leasePendingDeletion = new LeasePendingDeletion(streamIdentifier, currentShardLease,
    -                shardInfo, shardDetector);
    +        final LeasePendingDeletion leasePendingDeletion =
    +                new LeasePendingDeletion(streamIdentifier, currentShardLease, shardInfo, shardDetector);
             if (!leaseCleanupManager.isEnqueuedForDeletion(leasePendingDeletion)) {
                 boolean isSuccess = false;
                 try {
    -                isSuccess = attemptShardEndCheckpointing(scope, startTime);
    +                isSuccess = attemptShardEndCheckpointing(leaseKey, scope, startTime);
                 } finally {
                     // Check if either the shard end ddb persist is successful or
                     // if childshards is empty. When child shards is empty then either it is due to
    @@ -202,79 +229,117 @@ public class ShutdownTask implements ConsumerTask {
             }
         }
     
    -    private boolean attemptShardEndCheckpointing(MetricsScope scope, long startTime)
    +    private boolean attemptShardEndCheckpointing(final String leaseKey, MetricsScope scope, long startTime)
                 throws DependencyException, ProvisionedThroughputException, InvalidStateException,
    -            CustomerApplicationException {
    -        final Lease leaseFromDdb = Optional.ofNullable(leaseCoordinator.leaseRefresher().getLease(leaseKeyProvider.apply(shardInfo)))
    -                .orElseThrow(() -> new InvalidStateException("Lease for shard " + leaseKeyProvider.apply(shardInfo) + " does not exist."));
    +                    CustomerApplicationException {
    +        final Lease leaseFromDdb = Optional.ofNullable(
    +                        leaseCoordinator.leaseRefresher().getLease(leaseKey))
    +                .orElseThrow(() -> new InvalidStateException("Lease for shard " + leaseKey + " does not exist."));
             if (!leaseFromDdb.checkpoint().equals(ExtendedSequenceNumber.SHARD_END)) {
                 // Call the shardRecordsProcessor to checkpoint with SHARD_END sequence number.
    -            // The shardEnded is implemented by customer. We should validate if the SHARD_END checkpointing is successful after calling shardEnded.
    -            throwOnApplicationException(() -> applicationCheckpointAndVerification(), scope, startTime);
    +            // The shardEnded is implemented by customer. We should validate if the SHARD_END checkpointing is
    +            // successful after calling shardEnded.
    +            throwOnApplicationException(
    +                    leaseKey, () -> applicationCheckpointAndVerification(leaseKey), scope, startTime);
             }
             return true;
         }
     
    -    private void applicationCheckpointAndVerification() {
    -        recordProcessorCheckpointer
    -                .sequenceNumberAtShardEnd(recordProcessorCheckpointer.largestPermittedCheckpointValue());
    +    private void applicationCheckpointAndVerification(final String leaseKey) {
    +        recordProcessorCheckpointer.sequenceNumberAtShardEnd(
    +                recordProcessorCheckpointer.largestPermittedCheckpointValue());
             recordProcessorCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END);
    -        shardRecordProcessor.shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build());
    +        shardRecordProcessor.shardEnded(ShardEndedInput.builder()
    +                .checkpointer(recordProcessorCheckpointer)
    +                .build());
             final ExtendedSequenceNumber lastCheckpointValue = recordProcessorCheckpointer.lastCheckpointValue();
    -        if (lastCheckpointValue == null
    -                || !lastCheckpointValue.equals(ExtendedSequenceNumber.SHARD_END)) {
    +        if (!ExtendedSequenceNumber.SHARD_END.equals(lastCheckpointValue)) {
                 throw new IllegalArgumentException("Application didn't checkpoint at end of shard "
    -                    + leaseKeyProvider.apply(shardInfo) + ". Application must checkpoint upon shard end. " +
    -                    "See ShardRecordProcessor.shardEnded javadocs for more information.");
    +                    + leaseKey + ". Application must checkpoint upon shard end. "
    +                    + "See ShardRecordProcessor.shardEnded javadocs for more information.");
             }
         }
     
    -    private void throwOnApplicationException(Runnable action, MetricsScope metricsScope, final long startTime) throws CustomerApplicationException {
    +    private void throwOnApplicationException(
    +            final String leaseKey, Runnable action, MetricsScope metricsScope, final long startTime)
    +            throws CustomerApplicationException {
             try {
                 action.run();
             } catch (Exception e) {
    -            throw new CustomerApplicationException("Customer application throws exception for shard " + leaseKeyProvider.apply(shardInfo) +": ", e);
    +            throw new CustomerApplicationException(
    +                    "Customer application throws exception for shard " + leaseKey + ": ", e);
             } finally {
                 MetricsUtil.addLatency(metricsScope, RECORD_PROCESSOR_SHUTDOWN_METRIC, startTime, MetricsLevel.SUMMARY);
             }
         }
     
    -    private void createLeasesForChildShardsIfNotExist()
    +    private void createLeasesForChildShardsIfNotExist(MetricsScope scope)
                 throws DependencyException, InvalidStateException, ProvisionedThroughputException {
    +        final LeaseRefresher leaseRefresher = leaseCoordinator.leaseRefresher();
    +
             // For child shard resulted from merge of two parent shards, verify if both the parents are either present or
             // not present in the lease table before creating the lease entry.
    -        if (!CollectionUtils.isNullOrEmpty(childShards) && childShards.size() == 1) {
    +        if (childShards.size() == 1) {
                 final ChildShard childShard = childShards.get(0);
                 final List parentLeaseKeys = childShard.parentShards().stream()
    -                    .map(parentShardId -> ShardInfo.getLeaseKey(shardInfo, parentShardId)).collect(Collectors.toList());
    +                    .map(parentShardId -> ShardInfo.getLeaseKey(shardInfo, parentShardId))
    +                    .collect(Collectors.toList());
                 if (parentLeaseKeys.size() != 2) {
    +                MetricsUtil.addCount(scope, "MissingMergeParent", 1, MetricsLevel.SUMMARY);
                     throw new InvalidStateException("Shard " + shardInfo.shardId() + "'s only child shard " + childShard
                             + " does not contain other parent information.");
    -            } else {
    -                boolean isValidLeaseTableState =
    -                        Objects.isNull(leaseCoordinator.leaseRefresher().getLease(parentLeaseKeys.get(0))) == Objects
    -                                .isNull(leaseCoordinator.leaseRefresher().getLease(parentLeaseKeys.get(1)));
    -                if (!isValidLeaseTableState) {
    -                    if (!isOneInNProbability(RETRY_RANDOM_MAX_RANGE)) {
    -                        throw new BlockedOnParentShardException(
    -                                "Shard " + shardInfo.shardId() + "'s only child shard " + childShard
    -                                        + " has partial parent information in lease table. Hence deferring lease creation of child shard.");
    -                    } else {
    -                        throw new InvalidStateException(
    -                                "Shard " + shardInfo.shardId() + "'s only child shard " + childShard
    -                                        + " has partial parent information in lease table. Hence deferring lease creation of child shard.");
    -                    }
    +            }
    +
    +            final Lease parentLease0 = leaseRefresher.getLease(parentLeaseKeys.get(0));
    +            final Lease parentLease1 = leaseRefresher.getLease(parentLeaseKeys.get(1));
    +            if (Objects.isNull(parentLease0) != Objects.isNull(parentLease1)) {
    +                MetricsUtil.addCount(scope, "MissingMergeParentLease", 1, MetricsLevel.SUMMARY);
    +                final String message = "Shard " + shardInfo.shardId() + "'s only child shard " + childShard
    +                        + " has partial parent information in lease table: [parent0="
    +                        + parentLease0 + ", parent1="
    +                        + parentLease1 + "]. Hence deferring lease creation of child shard.";
    +                if (isOneInNProbability(RETRY_RANDOM_MAX_RANGE)) {
    +                    // abort further attempts and drop the lease; lease will
    +                    // be reassigned
    +                    throw new InvalidStateException(message);
    +                } else {
    +                    // initiate a Thread.sleep(...) and keep the lease;
    +                    // keeping the lease decreases churn of lease reassignments
    +                    throw new BlockedOnParentShardException(message);
                     }
                 }
             }
    -        for(ChildShard childShard : childShards) {
    -            final String leaseKey = ShardInfo.getLeaseKey(shardInfo, childShard.shardId());
    -            if(leaseCoordinator.leaseRefresher().getLease(leaseKey) == null) {
    -                log.debug("{} - Shard {} - Attempting to create lease for child shard {}", shardDetector.streamIdentifier(), shardInfo.shardId(), leaseKey);
    -                final Lease leaseToCreate = hierarchicalShardSyncer.createLeaseForChildShard(childShard, shardDetector.streamIdentifier());
    -                leaseCoordinator.leaseRefresher().createLeaseIfNotExists(leaseToCreate);
     
    -                log.info("{} - Shard {}: Created child shard lease: {}", shardDetector.streamIdentifier(), shardInfo.shardId(), leaseToCreate);
    +        for (ChildShard childShard : childShards) {
    +            final String leaseKey = ShardInfo.getLeaseKey(shardInfo, childShard.shardId());
    +            if (leaseRefresher.getLease(leaseKey) == null) {
    +                log.debug(
    +                        "{} - Shard {} - Attempting to create lease for child shard {}",
    +                        shardDetector.streamIdentifier(),
    +                        shardInfo.shardId(),
    +                        leaseKey);
    +                final Lease leaseToCreate =
    +                        hierarchicalShardSyncer.createLeaseForChildShard(childShard, shardDetector.streamIdentifier());
    +                final long startTime = System.currentTimeMillis();
    +                boolean success = false;
    +                try {
    +                    leaseRefresher.createLeaseIfNotExists(leaseToCreate);
    +                    success = true;
    +                } finally {
    +                    MetricsUtil.addSuccessAndLatency(scope, "CreateLease", success, startTime, MetricsLevel.DETAILED);
    +                    if (leaseToCreate.checkpoint() != null) {
    +                        final String metricName = leaseToCreate.checkpoint().isSentinelCheckpoint()
    +                                ? leaseToCreate.checkpoint().sequenceNumber()
    +                                : "SEQUENCE_NUMBER";
    +                        MetricsUtil.addSuccess(scope, "CreateLease_" + metricName, true, MetricsLevel.DETAILED);
    +                    }
    +                }
    +
    +                log.info(
    +                        "{} - Shard {}: Created child shard lease: {}",
    +                        shardDetector.streamIdentifier(),
    +                        shardInfo.shardId(),
    +                        leaseToCreate);
                 }
             }
         }
    @@ -284,18 +349,22 @@ public class ShutdownTask implements ConsumerTask {
          */
         @VisibleForTesting
         boolean isOneInNProbability(int n) {
    -        Random r = new Random();
    -        return 1 == r.nextInt((n - 1) + 1) + 1;
    +        return 0 == RANDOM.nextInt(n);
         }
     
         private void updateLeaseWithChildShards(Lease currentLease)
                 throws DependencyException, InvalidStateException, ProvisionedThroughputException {
    -        Set childShardIds = childShards.stream().map(ChildShard::shardId).collect(Collectors.toSet());
    +        Set childShardIds =
    +                childShards.stream().map(ChildShard::shardId).collect(Collectors.toSet());
     
             final Lease updatedLease = currentLease.copy();
             updatedLease.childShardIds(childShardIds);
             leaseCoordinator.leaseRefresher().updateLeaseWithMetaInfo(updatedLease, UpdateField.CHILD_SHARDS);
    -        log.info("Shard {}: Updated current lease {} with child shard information: {}", shardInfo.shardId(), currentLease.leaseKey(), childShardIds);
    +        log.info(
    +                "Shard {}: Updated current lease {} with child shard information: {}",
    +                shardInfo.shardId(),
    +                currentLease.leaseKey(),
    +                childShardIds);
         }
     
         /*
    @@ -313,10 +382,11 @@ public class ShutdownTask implements ConsumerTask {
             return reason;
         }
     
    -    private void dropLease(Lease currentLease) {
    +    private void dropLease(Lease currentLease, final String leaseKey) {
             if (currentLease == null) {
    -            log.warn("Shard {}: Unable to find the lease for shard. Will shutdown the shardConsumer directly.", leaseKeyProvider.apply(shardInfo));
    -            return;
    +            log.warn(
    +                    "Shard {}: Unable to find the lease for shard. Will shutdown the shardConsumer directly.",
    +                    leaseKey);
             } else {
                 leaseCoordinator.dropLease(currentLease);
                 log.info("Dropped lease for shutting down ShardConsumer: " + currentLease.leaseKey());
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java
    index b061faa4..a60ed071 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java
    @@ -69,5 +69,4 @@ public class TaskResult {
             this.exception = e;
             this.shardEndReached = isShardEndReached;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java
    index 877b0a80..8981a0c0 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java
    @@ -33,5 +33,4 @@ import software.amazon.kinesis.processor.ShardRecordProcessor;
     @Builder
     @EqualsAndHashCode
     @ToString
    -public class LeaseLostInput {
    -}
    +public class LeaseLostInput {}
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java
    index 1ce9239b..8cbb9bac 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java
    @@ -24,8 +24,8 @@ import lombok.Getter;
     import lombok.ToString;
     import lombok.experimental.Accessors;
     import software.amazon.awssdk.services.kinesis.model.ChildShard;
    -import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.processor.RecordProcessorCheckpointer;
    +import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.retrieval.KinesisClientRecord;
     
     /**
    @@ -76,7 +76,7 @@ public class ProcessRecordsInput {
     
         /**
          * How long the records spent waiting to be dispatched to the {@link ShardRecordProcessor}
    -     * 
    +     *
          * @return the amount of time that records spent waiting before processing.
          */
         public Duration timeSpentInCache() {
    @@ -85,5 +85,4 @@ public class ProcessRecordsInput {
             }
             return Duration.between(cacheEntryTime, cacheExitTime);
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java
    index 407c9880..5ae07a6e 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java
    @@ -20,8 +20,8 @@ import lombok.EqualsAndHashCode;
     import lombok.Getter;
     import lombok.ToString;
     import lombok.experimental.Accessors;
    -import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.processor.RecordProcessorCheckpointer;
    +import software.amazon.kinesis.processor.ShardRecordProcessor;
     
     /**
      * Provides a checkpointer that must be used to signal the completion of the shard to the Scheduler.
    @@ -41,5 +41,4 @@ public class ShardEndedInput {
          * shutdown until a successful checkpoint occurs.
          */
         private final RecordProcessorCheckpointer checkpointer;
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java
    index 4c01fde3..7ef8f674 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java
    @@ -18,9 +18,9 @@ import lombok.Builder;
     import lombok.Data;
     import lombok.experimental.Accessors;
     import software.amazon.kinesis.leases.ShardInfo;
    +import software.amazon.kinesis.lifecycle.TaskExecutionListener;
     import software.amazon.kinesis.lifecycle.TaskOutcome;
     import software.amazon.kinesis.lifecycle.TaskType;
    -import software.amazon.kinesis.lifecycle.TaskExecutionListener;
     
     /**
      * Container for the parameters to the TaskExecutionListener's
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java
    index a293521e..226064d1 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java
    @@ -18,12 +18,10 @@ package software.amazon.kinesis.metrics;
      * This is a MetricScope with a KeyType of String. It provides the implementation of
      * getting the key based off of the String KeyType.
      */
    -
     public abstract class AccumulateByNameMetricsScope extends AccumulatingMetricsScope {
     
         @Override
         protected String getKey(String name) {
             return name;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java
    index 31891ecf..5ff88525 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java
    @@ -25,10 +25,10 @@ import software.amazon.awssdk.services.cloudwatch.model.StatisticSet;
      * An IMetricsScope that accumulates data from multiple calls to addData with
      * the same name parameter. It tracks min, max, sample count, and sum for each
      * named metric.
    - * 
    + *
      * @param  can be a class or object defined by the user that stores information about a MetricDatum needed
      *        by the user.
    - * 
    + *
      *        The following is a example of what a KeyType class might look like:
      *        class SampleKeyType {
      *              private long timeKeyCreated;
    @@ -61,9 +61,9 @@ public abstract class AccumulatingMetricsScope extends EndingMetricsSco
         protected abstract KeyType getKey(String name);
     
         /**
    -     * Adds data points to an IMetricsScope. Multiple calls to IMetricsScopes that have the 
    +     * Adds data points to an IMetricsScope. Multiple calls to IMetricsScopes that have the
          * same key will have their data accumulated.
    -     * 
    +     *
          * @param key
          *        data point key
          * @param name
    @@ -79,9 +79,15 @@ public abstract class AccumulatingMetricsScope extends EndingMetricsSco
             final MetricDatum datum = data.get(key);
             final MetricDatum metricDatum;
             if (datum == null) {
    -            metricDatum = MetricDatum.builder().metricName(name).unit(unit)
    -                    .statisticValues(
    -                            StatisticSet.builder().maximum(value).minimum(value).sampleCount(1.0).sum(value).build())
    +            metricDatum = MetricDatum.builder()
    +                    .metricName(name)
    +                    .unit(unit)
    +                    .statisticValues(StatisticSet.builder()
    +                            .maximum(value)
    +                            .minimum(value)
    +                            .sampleCount(1.0)
    +                            .sum(value)
    +                            .build())
                         .build();
             } else {
                 if (!datum.unit().equals(unit)) {
    @@ -91,8 +97,10 @@ public abstract class AccumulatingMetricsScope extends EndingMetricsSco
                 final StatisticSet oldStatisticSet = datum.statisticValues();
                 final StatisticSet statisticSet = oldStatisticSet.toBuilder()
                         .maximum(Math.max(value, oldStatisticSet.maximum()))
    -                    .minimum(Math.min(value, oldStatisticSet.minimum())).sampleCount(oldStatisticSet.sampleCount() + 1)
    -                    .sum(oldStatisticSet.sum() + value).build();
    +                    .minimum(Math.min(value, oldStatisticSet.minimum()))
    +                    .sampleCount(oldStatisticSet.sampleCount() + 1)
    +                    .sum(oldStatisticSet.sum() + value)
    +                    .build();
     
                 metricDatum = datum.toBuilder().statisticValues(statisticSet).build();
             }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java
    index 4b04cad7..21fc919a 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java
    @@ -20,10 +20,8 @@ import java.util.Objects;
     import software.amazon.awssdk.services.cloudwatch.model.Dimension;
     import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
     
    -
    -
    -/*
    - * A representation of a key of a MetricDatum. This class is useful when wanting to compare 
    +/**
    + * A representation of a key of a MetricDatum. This class is useful when wanting to compare
      * whether 2 keys have the same MetricDatum. This feature will be used in MetricAccumulatingQueue
      * where we aggregate metrics across multiple MetricScopes.
      */
    @@ -31,11 +29,10 @@ public class CloudWatchMetricKey {
     
         private List dimensions;
         private String metricName;
    -    
    +
         /**
          * @param datum data point
          */
    -
         public CloudWatchMetricKey(MetricDatum datum) {
             this.dimensions = datum.dimensions();
             this.metricName = datum.metricName();
    @@ -48,14 +45,16 @@ public class CloudWatchMetricKey {
     
         @Override
         public boolean equals(Object obj) {
    -        if (this == obj)
    +        if (this == obj) {
                 return true;
    -        if (obj == null)
    +        }
    +        if (obj == null) {
                 return false;
    -        if (getClass() != obj.getClass())
    +        }
    +        if (getClass() != obj.getClass()) {
                 return false;
    +        }
             CloudWatchMetricKey other = (CloudWatchMetricKey) obj;
             return Objects.equals(other.dimensions, dimensions) && Objects.equals(other.metricName, metricName);
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java
    index 59e5cac1..11d77359 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java
    @@ -17,7 +17,6 @@ package software.amazon.kinesis.metrics;
     import java.util.Set;
     
     import com.google.common.collect.ImmutableSet;
    -
     import lombok.NonNull;
     import software.amazon.awssdk.core.exception.AbortedException;
     import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
    @@ -33,6 +32,7 @@ public class CloudWatchMetricsFactory implements MetricsFactory {
          * immediately instead of waiting for the next scheduled call.
          */
         private final CloudWatchPublisherRunnable runnable;
    +
         private final Thread publicationThread;
     
         /**
    @@ -62,16 +62,20 @@ public class CloudWatchMetricsFactory implements MetricsFactory {
          * @param flushSize
          *            size of batch that can be published
          */
    -    public CloudWatchMetricsFactory(@NonNull final CloudWatchAsyncClient cloudWatchClient,
    -            @NonNull final String namespace, final long bufferTimeMillis, final int maxQueueSize,
    -            @NonNull final MetricsLevel metricsLevel, @NonNull final Set metricsEnabledDimensions,
    +    public CloudWatchMetricsFactory(
    +            @NonNull final CloudWatchAsyncClient cloudWatchClient,
    +            @NonNull final String namespace,
    +            final long bufferTimeMillis,
    +            final int maxQueueSize,
    +            @NonNull final MetricsLevel metricsLevel,
    +            @NonNull final Set metricsEnabledDimensions,
                 final int flushSize) {
             this.metricsLevel = metricsLevel;
    -        this.metricsEnabledDimensions = (metricsEnabledDimensions == null ? ImmutableSet.of()
    -                : ImmutableSet.copyOf(metricsEnabledDimensions));
    +        this.metricsEnabledDimensions =
    +                (metricsEnabledDimensions == null ? ImmutableSet.of() : ImmutableSet.copyOf(metricsEnabledDimensions));
     
    -        runnable = new CloudWatchPublisherRunnable(new CloudWatchMetricsPublisher(cloudWatchClient, namespace),
    -                bufferTimeMillis, maxQueueSize, flushSize);
    +        runnable = new CloudWatchPublisherRunnable(
    +                new CloudWatchMetricsPublisher(cloudWatchClient, namespace), bufferTimeMillis, maxQueueSize, flushSize);
             publicationThread = new Thread(runnable);
             publicationThread.setName("cw-metrics-publisher");
             publicationThread.start();
    @@ -90,5 +94,4 @@ public class CloudWatchMetricsFactory implements MetricsFactory {
                 throw AbortedException.builder().message(e.getMessage()).cause(e).build();
             }
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java
    index 24a6e464..82ad1876 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java
    @@ -14,6 +14,12 @@
      */
     package software.amazon.kinesis.metrics;
     
    +import java.util.ArrayList;
    +import java.util.List;
    +import java.util.concurrent.CompletableFuture;
    +import java.util.concurrent.ExecutionException;
    +import java.util.concurrent.TimeoutException;
    +
     import lombok.extern.slf4j.Slf4j;
     import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
     import software.amazon.awssdk.services.cloudwatch.model.CloudWatchException;
    @@ -21,12 +27,6 @@ import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
     import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest;
     import software.amazon.kinesis.retrieval.AWSExceptionManager;
     
    -import java.util.ArrayList;
    -import java.util.List;
    -import java.util.concurrent.CompletableFuture;
    -import java.util.concurrent.ExecutionException;
    -import java.util.concurrent.TimeoutException;
    -
     import static java.util.concurrent.TimeUnit.MILLISECONDS;
     
     /**
    @@ -38,6 +38,7 @@ public class CloudWatchMetricsPublisher {
         private static final int BATCH_SIZE = 20;
         private static final int PUT_TIMEOUT_MILLIS = 5000;
         private static final AWSExceptionManager CW_EXCEPTION_MANAGER = new AWSExceptionManager();
    +
         static {
             CW_EXCEPTION_MANAGER.add(CloudWatchException.class, t -> t);
         }
    @@ -71,9 +72,11 @@ public class CloudWatchMetricsPublisher {
                 try {
                     PutMetricDataRequest.Builder finalRequest = request;
                     // This needs to be blocking. Making it asynchronous leads to increased throttling.
    -                blockingExecute(cloudWatchAsyncClient.putMetricData(finalRequest.build()), PUT_TIMEOUT_MILLIS,
    +                blockingExecute(
    +                        cloudWatchAsyncClient.putMetricData(finalRequest.build()),
    +                        PUT_TIMEOUT_MILLIS,
                             CW_EXCEPTION_MANAGER);
    -            } catch(CloudWatchException | TimeoutException e) {
    +            } catch (CloudWatchException | TimeoutException e) {
                     log.warn("Could not publish {} datums to CloudWatch", endIndex - startIndex, e);
                 } catch (Exception e) {
                     log.error("Unknown exception while publishing {} datums to CloudWatch", endIndex - startIndex, e);
    @@ -81,8 +84,9 @@ public class CloudWatchMetricsPublisher {
             }
         }
     
    -    private static  void blockingExecute(CompletableFuture future, long timeOutMillis,
    -            AWSExceptionManager exceptionManager) throws TimeoutException {
    +    private static  void blockingExecute(
    +            CompletableFuture future, long timeOutMillis, AWSExceptionManager exceptionManager)
    +            throws TimeoutException {
             try {
                 future.get(timeOutMillis, MILLISECONDS);
             } catch (ExecutionException e) {
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java
    index c2d38526..0346d9a1 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java
    @@ -18,7 +18,6 @@ import java.util.List;
     import java.util.Set;
     import java.util.stream.Collectors;
     
    -
     /**
      * Metrics scope for CloudWatch metrics.
      */
    @@ -32,8 +31,8 @@ public class CloudWatchMetricsScope extends FilteringMetricsScope implements Met
          * @param metricsLevel Metrics level to enable. All data with level below this will be dropped.
          * @param metricsEnabledDimensions Enabled dimensions for CloudWatch metrics.
          */
    -    public CloudWatchMetricsScope(CloudWatchPublisherRunnable publisher,
    -                                  MetricsLevel metricsLevel, Set metricsEnabledDimensions) {
    +    public CloudWatchMetricsScope(
    +            CloudWatchPublisherRunnable publisher, MetricsLevel metricsLevel, Set metricsEnabledDimensions) {
             super(metricsLevel, metricsEnabledDimensions);
             this.publisher = publisher;
         }
    @@ -49,11 +48,11 @@ public class CloudWatchMetricsScope extends FilteringMetricsScope implements Met
             super.end();
     
             final List> dataWithKeys = data.values().stream()
    -                .map(metricDatum -> metricDatum.toBuilder().dimensions(getDimensions()).build())
    +                .map(metricDatum ->
    +                        metricDatum.toBuilder().dimensions(getDimensions()).build())
                     .map(metricDatum -> new MetricDatumWithKey<>(new CloudWatchMetricKey(metricDatum), metricDatum))
                     .collect(Collectors.toList());
     
             publisher.enqueue(dataWithKeys);
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java
    index aeb40b45..5bb41c73 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java
    @@ -43,27 +43,26 @@ public class CloudWatchPublisherRunnable implements Runnable {
     
         /**
          * Constructor.
    -     * 
    +     *
          * @param metricsPublisher publishes metrics
          * @param bufferTimeMillis time between publishing metrics
          * @param maxQueueSize max size of metrics to publish
          * @param batchSize size of batch that can be published at a time
          */
    -
    -    public CloudWatchPublisherRunnable(CloudWatchMetricsPublisher metricsPublisher,
    -                                       long bufferTimeMillis,
    -                                       int maxQueueSize,
    -                                       int batchSize) {
    +    public CloudWatchPublisherRunnable(
    +            CloudWatchMetricsPublisher metricsPublisher, long bufferTimeMillis, int maxQueueSize, int batchSize) {
             this(metricsPublisher, bufferTimeMillis, maxQueueSize, batchSize, 0);
         }
     
    -    public CloudWatchPublisherRunnable(CloudWatchMetricsPublisher metricsPublisher,
    -                                       long bufferTimeMillis,
    -                                       int maxQueueSize,
    -                                       int batchSize,
    -                                       int maxJitter) {
    +    public CloudWatchPublisherRunnable(
    +            CloudWatchMetricsPublisher metricsPublisher,
    +            long bufferTimeMillis,
    +            int maxQueueSize,
    +            int batchSize,
    +            int maxJitter) {
             if (log.isDebugEnabled()) {
    -            log.debug("Constructing CloudWatchPublisherRunnable with maxBufferTimeMillis {} maxQueueSize {} batchSize {} maxJitter {}",
    +            log.debug(
    +                    "Constructing CloudWatchPublisherRunnable with maxBufferTimeMillis {} maxQueueSize {} batchSize {} maxJitter {}",
                         bufferTimeMillis,
                         maxQueueSize,
                         batchSize,
    @@ -98,7 +97,7 @@ public class CloudWatchPublisherRunnable implements Runnable {
             synchronized (queue) {
                 /*
                  * We should send if:
    -             * 
    +             *
                  * it's been maxBufferTimeMillis since our last send
                  * or if the queue contains > batchSize elements
                  * or if we're shutting down
    @@ -121,8 +120,7 @@ public class CloudWatchPublisherRunnable implements Runnable {
                 } else {
                     long waitTime = bufferTimeMillis - timeSinceFlush;
                     if (log.isDebugEnabled()) {
    -                    log.debug("Waiting up to {} ms for {} more datums to appear.", waitTime, flushSize
    -                            - queue.size());
    +                    log.debug("Waiting up to {} ms for {} more datums to appear.", waitTime, flushSize - queue.size());
                     }
     
                     try {
    @@ -169,7 +167,7 @@ public class CloudWatchPublisherRunnable implements Runnable {
     
         /**
          * Enqueues metric data for publication.
    -     * 
    +     *
          * @param data collection of MetricDatum to enqueue
          */
         public void enqueue(Collection> data) {
    @@ -197,5 +195,4 @@ public class CloudWatchPublisherRunnable implements Runnable {
                 queue.notify();
             }
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java
    index 9b6390ad..708be460 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java
    @@ -14,25 +14,23 @@
      */
     package software.amazon.kinesis.metrics;
     
    -import software.amazon.awssdk.services.cloudwatch.model.Dimension;
    -
     import java.util.HashSet;
     import java.util.Set;
     
    +import software.amazon.awssdk.services.cloudwatch.model.Dimension;
     
     /**
      * DimensionTrackingMetricsScope is where we provide functionality for dimensions.
      * Dimensions allow the user to be able view their metrics based off of the parameters they specify.
    - * 
    + *
      * The following examples show how to add dimensions if they would like to view their all metrics
      * pertaining to a particular stream or for a specific date.
    - * 
    + *
      * myScope.addDimension("StreamName", "myStreamName");
      * myScope.addDimension("Date", "Dec012013");
    - * 
    - * 
    + *
    + *
      */
    -
     public abstract class DimensionTrackingMetricsScope implements MetricsScope {
     
         private Set dimensions = new HashSet<>();
    @@ -45,9 +43,7 @@ public abstract class DimensionTrackingMetricsScope implements MetricsScope {
         /**
          * @return a set of dimensions for an IMetricsScope
          */
    -
         protected Set getDimensions() {
             return dimensions;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java
    index 96849850..7a118af9 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java
    @@ -54,10 +54,10 @@ public class FilteringMetricsScope extends AccumulateByNameMetricsScope {
          * @param metricsEnabledDimensions Enabled dimensions.
          */
         public FilteringMetricsScope(MetricsLevel metricsLevel, Set metricsEnabledDimensions) {
    -          this.metricsLevel = metricsLevel;
    -          this.metricsEnabledDimensions = metricsEnabledDimensions;
    -          this.metricsEnabledDimensionsAll = (metricsEnabledDimensions != null &&
    -                  metricsEnabledDimensions.contains(METRICS_DIMENSIONS_ALL));
    +        this.metricsLevel = metricsLevel;
    +        this.metricsEnabledDimensions = metricsEnabledDimensions;
    +        this.metricsEnabledDimensionsAll =
    +                (metricsEnabledDimensions != null && metricsEnabledDimensions.contains(METRICS_DIMENSIONS_ALL));
         }
     
         /**
    @@ -95,8 +95,8 @@ public class FilteringMetricsScope extends AccumulateByNameMetricsScope {
          */
         @Override
         public void addDimension(String name, String value) {
    -        if (!metricsEnabledDimensionsAll &&
    -                (metricsEnabledDimensions == null || !metricsEnabledDimensions.contains(name))) {
    +        if (!metricsEnabledDimensionsAll
    +                && (metricsEnabledDimensions == null || !metricsEnabledDimensions.contains(name))) {
                 // Drop dimension.
                 return;
             }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java
    index d05b61ad..795b1f25 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java
    @@ -14,7 +14,6 @@
      */
     package software.amazon.kinesis.metrics;
     
    -
     import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
     
     public abstract class InterceptingMetricsFactory implements MetricsFactory {
    @@ -40,7 +39,8 @@ public abstract class InterceptingMetricsFactory implements MetricsFactory {
             scope.addData(name, value, unit);
         }
     
    -    protected void interceptAddData(String name, double value, StandardUnit unit, MetricsLevel level, MetricsScope scope) {
    +    protected void interceptAddData(
    +            String name, double value, StandardUnit unit, MetricsLevel level, MetricsScope scope) {
             scope.addData(name, value, unit, level);
         }
     
    @@ -79,7 +79,5 @@ public abstract class InterceptingMetricsFactory implements MetricsFactory {
             public void end() {
                 interceptEnd(other);
             }
    -
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java
    index 8b879be0..5c0f3ca9 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java
    @@ -23,5 +23,4 @@ public class LogMetricsFactory implements MetricsFactory {
         public LogMetricsScope createMetrics() {
             return new LogMetricsScope();
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java
    index 16b86f3d..8879c574 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java
    @@ -14,8 +14,6 @@
      */
     package software.amazon.kinesis.metrics;
     
    -
    -
     import lombok.extern.slf4j.Slf4j;
     import software.amazon.awssdk.services.cloudwatch.model.Dimension;
     import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
    @@ -41,7 +39,8 @@ public class LogMetricsScope extends AccumulateByNameMetricsScope {
     
             for (MetricDatum datum : data.values()) {
                 StatisticSet statistics = datum.statisticValues();
    -            output.append(String.format("Name=%25s\tMin=%.2f\tMax=%.2f\tCount=%.2f\tSum=%.2f\tAvg=%.2f\tUnit=%s\n",
    +            output.append(String.format(
    +                    "Name=%25s\tMin=%.2f\tMax=%.2f\tCount=%.2f\tSum=%.2f\tAvg=%.2f\tUnit=%s\n",
                         datum.metricName(),
                         statistics.minimum(),
                         statistics.maximum(),
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java
    index 603558d0..b9b89e1b 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java
    @@ -24,13 +24,12 @@ import java.util.concurrent.LinkedBlockingQueue;
     import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
     import software.amazon.awssdk.services.cloudwatch.model.StatisticSet;
     
    -
     /**
      * Helper class for accumulating MetricDatums with the same name and dimensions.
    - * 
    + *
      * @param  can be a class or object defined by the user that stores information about a MetricDatum needed
      *        by the user.
    - * 
    + *
      *        The following is a example of what a KeyType class might look like:
      *        class SampleKeyType {
      *              private long timeKeyCreated;
    @@ -75,7 +74,7 @@ public class MetricAccumulatingQueue {
         /**
          * We use a queue and a map in this method. The reason for this is because, the queue will keep our metrics in
          * FIFO order and the map will provide us with constant time lookup to get the appropriate MetricDatum.
    -     * 
    +     *
          * @param key metric key to be inserted into queue
          * @param datum metric to be inserted into queue
          * @return a boolean depending on whether the datum was inserted into the queue
    @@ -106,10 +105,12 @@ public class MetricAccumulatingQueue {
             StatisticSet oldStats = oldDatum.statisticValues();
             StatisticSet newStats = newDatum.statisticValues();
     
    -        StatisticSet statisticSet = oldStats.toBuilder().sum(oldStats.sum() + newStats.sum())
    +        StatisticSet statisticSet = oldStats.toBuilder()
    +                .sum(oldStats.sum() + newStats.sum())
                     .minimum(Math.min(oldStats.minimum(), newStats.minimum()))
                     .maximum(Math.max(oldStats.maximum(), newStats.maximum()))
    -                .sampleCount(oldStats.sampleCount() + newStats.sampleCount()).build();
    +                .sampleCount(oldStats.sampleCount() + newStats.sampleCount())
    +                .build();
     
             MetricDatum datum = oldDatum.toBuilder().statisticValues(statisticSet).build();
             metricDatumWithKey.datum(datum);
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java
    index 5234ffe4..3e542496 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java
    @@ -14,29 +14,27 @@
      */
     package software.amazon.kinesis.metrics;
     
    +import java.util.Objects;
    +
     import lombok.AllArgsConstructor;
    -import lombok.Data;
     import lombok.Setter;
     import lombok.experimental.Accessors;
     import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
     
    -import java.util.Objects;
    -
     /**
      * This class is used to store a MetricDatum as well as KeyType which stores specific information about
      * that particular MetricDatum.
    - * 
    + *
      * @param  is a class that stores information about a MetricDatum. This is useful
      *        to compare MetricDatums, aggregate similar MetricDatums or store information about a datum
      *        that may be relevant to the user (i.e. MetricName, CustomerId, TimeStamp, etc).
    - * 
    + *
      *        Example:
    - * 
    + *
      *        Let SampleMetricKey be a KeyType that takes in the time in which the datum was created.
    - * 
    + *
      *        MetricDatumWithKey sampleDatumWithKey = new MetricDatumWithKey(new
      *        SampleMetricKey(System.currentTimeMillis()), datum)
    - *        
      */
     @AllArgsConstructor
     @Setter
    @@ -59,14 +57,16 @@ public class MetricDatumWithKey {
     
         @Override
         public boolean equals(Object obj) {
    -        if (this == obj)
    +        if (this == obj) {
                 return true;
    -        if (obj == null)
    +        }
    +        if (obj == null) {
                 return false;
    -        if (getClass() != obj.getClass())
    +        }
    +        if (getClass() != obj.getClass()) {
                 return false;
    +        }
             MetricDatumWithKey other = (MetricDatumWithKey) obj;
             return Objects.equals(other.key, key) && Objects.equals(other.datum, datum);
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java
    index 224562a4..7cc679a4 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java
    @@ -30,7 +30,7 @@ public class MetricsCollectingTaskDecorator implements ConsumerTask {
     
         /**
          * Constructor.
    -     * 
    +     *
          * @param other
          *            task to report metrics on
          * @param factory
    @@ -46,14 +46,15 @@ public class MetricsCollectingTaskDecorator implements ConsumerTask {
          */
         @Override
         public TaskResult call() {
    -        MetricsScope scope = MetricsUtil.createMetricsWithOperation(factory, other.getClass().getSimpleName());
    +        MetricsScope scope =
    +                MetricsUtil.createMetricsWithOperation(factory, other.getClass().getSimpleName());
             TaskResult result = null;
             final long startTimeMillis = System.currentTimeMillis();
             try {
                 result = other.call();
             } finally {
    -            MetricsUtil.addSuccessAndLatency(scope, result != null && result.getException() == null, startTimeMillis,
    -                    MetricsLevel.SUMMARY);
    +            MetricsUtil.addSuccessAndLatency(
    +                    scope, result != null && result.getException() == null, startTimeMillis, MetricsLevel.SUMMARY);
                 MetricsUtil.endScope(scope);
             }
             return result;
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java
    index 2aa2edc4..b97bfb10 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java
    @@ -15,10 +15,10 @@
     
     package software.amazon.kinesis.metrics;
     
    +import java.util.HashSet;
     import java.util.Set;
     
     import com.google.common.collect.ImmutableSet;
    -
     import lombok.Data;
     import lombok.experimental.Accessors;
     import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
    @@ -32,14 +32,16 @@ public class MetricsConfig {
         /**
          * Metrics dimensions that always will be enabled regardless of the config provided by user.
          */
    -    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS = ImmutableSet
    -            .of(MetricsUtil.OPERATION_DIMENSION_NAME);
    +    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS =
    +            ImmutableSet.of(MetricsUtil.OPERATION_DIMENSION_NAME);
     
         /**
          * Allowed dimensions for CloudWatch metrics. By default, worker ID dimension will be disabled.
          */
    -    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS_WITH_SHARD_ID = ImmutableSet. builder()
    -            .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).add(MetricsUtil.SHARD_ID_DIMENSION_NAME).build();
    +    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS_WITH_SHARD_ID = ImmutableSet.builder()
    +            .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS)
    +            .add(MetricsUtil.SHARD_ID_DIMENSION_NAME)
    +            .build();
     
         /**
          * Metrics dimensions that signify all possible dimensions.
    @@ -94,7 +96,7 @@ public class MetricsConfig {
          * Default value: {@link MetricsConfig#METRICS_DIMENSIONS_ALL}
          * 

    */ - private Set metricsEnabledDimensions = METRICS_DIMENSIONS_ALL; + private HashSet metricsEnabledDimensions = new HashSet(METRICS_DIMENSIONS_ALL); /** * Buffer size for MetricDatums before publishing. @@ -109,8 +111,14 @@ public class MetricsConfig { public MetricsFactory metricsFactory() { if (metricsFactory == null) { - metricsFactory = new CloudWatchMetricsFactory(cloudWatchClient(), namespace(), metricsBufferTimeMillis(), - metricsMaxQueueSize(), metricsLevel(), metricsEnabledDimensions(), publisherFlushBuffer()); + metricsFactory = new CloudWatchMetricsFactory( + cloudWatchClient(), + namespace(), + metricsBufferTimeMillis(), + metricsMaxQueueSize(), + metricsLevel(), + metricsEnabledDimensions(), + publisherFlushBuffer()); } return metricsFactory; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java index 2efabcd1..ed18d45c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java @@ -30,7 +30,7 @@ public interface MetricsScope { /** * Adds a data point to this IMetricsScope. Multiple calls against the same IMetricsScope with the same name * parameter will result in accumulation. - * + * * @param name data point name * @param value data point value * @param unit unit of data point @@ -40,7 +40,7 @@ public interface MetricsScope { /** * Adds a data point to this IMetricsScope if given metrics level is enabled. Multiple calls against the same * IMetricsScope with the same name parameter will result in accumulation. - * + * * @param name data point name * @param value data point value * @param unit unit of data point @@ -50,7 +50,7 @@ public interface MetricsScope { /** * Adds a dimension that applies to all metrics in this IMetricsScope. - * + * * @param name dimension name * @param value dimension value */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java index 20c7c244..8098109b 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java @@ -15,9 +15,8 @@ package software.amazon.kinesis.metrics; -import org.apache.commons.lang3.StringUtils; - import lombok.NonNull; +import org.apache.commons.lang3.StringUtils; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; import software.amazon.kinesis.common.StreamIdentifier; @@ -36,8 +35,8 @@ public class MetricsUtil { return createMetricScope(metricsFactory, null); } - public static MetricsScope createMetricsWithOperation(@NonNull final MetricsFactory metricsFactory, - @NonNull final String operation) { + public static MetricsScope createMetricsWithOperation( + @NonNull final MetricsFactory metricsFactory, @NonNull final String operation) { return createMetricScope(metricsFactory, operation); } @@ -58,44 +57,60 @@ public class MetricsUtil { .ifPresent(acc -> addOperation(metricsScope, STREAM_IDENTIFIER, streamId.serialize())); } - public static void addWorkerIdentifier(@NonNull final MetricsScope metricsScope, - @NonNull final String workerIdentifier) { + public static void addWorkerIdentifier( + @NonNull final MetricsScope metricsScope, @NonNull final String workerIdentifier) { addOperation(metricsScope, WORKER_IDENTIFIER_DIMENSION, workerIdentifier); } - public static void addOperation(@NonNull final MetricsScope metricsScope, @NonNull final String dimension, - @NonNull final String value) { + public static void addOperation( + @NonNull final MetricsScope metricsScope, @NonNull final String dimension, @NonNull final String value) { metricsScope.addDimension(dimension, value); } - public static void addSuccessAndLatency(@NonNull final MetricsScope metricsScope, final boolean success, - final long startTime, @NonNull final MetricsLevel metricsLevel) { + public static void addSuccessAndLatency( + @NonNull final MetricsScope metricsScope, + final boolean success, + final long startTime, + @NonNull final MetricsLevel metricsLevel) { addSuccessAndLatency(metricsScope, null, success, startTime, metricsLevel); } - public static void addSuccessAndLatency(@NonNull final MetricsScope metricsScope, final String dimension, - final boolean success, final long startTime, @NonNull final MetricsLevel metricsLevel) { + public static void addSuccessAndLatency( + @NonNull final MetricsScope metricsScope, + final String dimension, + final boolean success, + final long startTime, + @NonNull final MetricsLevel metricsLevel) { addSuccess(metricsScope, dimension, success, metricsLevel); addLatency(metricsScope, dimension, startTime, metricsLevel); } - public static void addLatency(@NonNull final MetricsScope metricsScope, final String dimension, - final long startTime, @NonNull final MetricsLevel metricsLevel) { - final String metricName = StringUtils.isEmpty(dimension) ? TIME_METRIC - : String.format("%s.%s", dimension, TIME_METRIC); - metricsScope.addData(metricName, System.currentTimeMillis() - startTime, StandardUnit.MILLISECONDS, - metricsLevel); + public static void addLatency( + @NonNull final MetricsScope metricsScope, + final String dimension, + final long startTime, + @NonNull final MetricsLevel metricsLevel) { + final String metricName = + StringUtils.isEmpty(dimension) ? TIME_METRIC : String.format("%s.%s", dimension, TIME_METRIC); + metricsScope.addData( + metricName, System.currentTimeMillis() - startTime, StandardUnit.MILLISECONDS, metricsLevel); } - public static void addSuccess(@NonNull final MetricsScope metricsScope, final String dimension, - final boolean success, @NonNull final MetricsLevel metricsLevel) { - final String metricName = StringUtils.isEmpty(dimension) ? SUCCESS_METRIC - : String.format("%s.%s", dimension, SUCCESS_METRIC); + public static void addSuccess( + @NonNull final MetricsScope metricsScope, + final String dimension, + final boolean success, + @NonNull final MetricsLevel metricsLevel) { + final String metricName = + StringUtils.isEmpty(dimension) ? SUCCESS_METRIC : String.format("%s.%s", dimension, SUCCESS_METRIC); metricsScope.addData(metricName, success ? 1 : 0, StandardUnit.COUNT, metricsLevel); } - public static void addCount(@NonNull final MetricsScope metricsScope, final String dimension, - final long count, @NonNull final MetricsLevel metricsLevel) { + public static void addCount( + @NonNull final MetricsScope metricsScope, + final String dimension, + final long count, + @NonNull final MetricsLevel metricsLevel) { metricsScope.addData(dimension, count, StandardUnit.COUNT, metricsLevel); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java index 793b90d4..cb4e4a12 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java @@ -22,5 +22,4 @@ public class NullMetricsFactory implements MetricsFactory { public MetricsScope createMetrics() { return SCOPE; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java index a872ab56..e99f4d9c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java @@ -19,22 +19,14 @@ import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; public class NullMetricsScope implements MetricsScope { @Override - public void addData(String name, double value, StandardUnit unit) { - - } + public void addData(String name, double value, StandardUnit unit) {} @Override - public void addData(String name, double value, StandardUnit unit, MetricsLevel level) { - - } + public void addData(String name, double value, StandardUnit unit, MetricsLevel level) {} @Override - public void addDimension(String name, String value) { - - } + public void addDimension(String name, String value) {} @Override - public void end() { - - } + public void end() {} } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java index 7a79e510..94ccdb96 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java @@ -14,7 +14,6 @@ */ package software.amazon.kinesis.metrics; - import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; /** @@ -65,5 +64,4 @@ public class ThreadSafeMetricsDelegatingScope implements MetricsScope { public synchronized void end() { delegate.end(); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java index 2ffadc06..ec53e7cc 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java @@ -14,8 +14,8 @@ */ package software.amazon.kinesis.processor; -import software.amazon.kinesis.exceptions.KinesisClientLibException; import software.amazon.kinesis.checkpoint.Checkpoint; +import software.amazon.kinesis.exceptions.KinesisClientLibException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; /** @@ -24,18 +24,18 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; public interface Checkpointer { /** - * Record a checkpoint for a shard (e.g. sequence and subsequence numbers of last record processed + * Record a checkpoint for a shard (e.g. sequence and subsequence numbers of last record processed * by application). Upon failover, record processing is resumed from this point. - * + * * @param leaseKey Checkpoint is specified for this shard. * @param checkpointValue Value of the checkpoint (e.g. Kinesis sequence number and subsequence number) * @param concurrencyToken Used with conditional writes to prevent stale updates - * (e.g. if there was a fail over to a different record processor, we don't want to + * (e.g. if there was a fail over to a different record processor, we don't want to * overwrite it's checkpoint) * @throws KinesisClientLibException Thrown if we were unable to save the checkpoint */ void setCheckpoint(String leaseKey, ExtendedSequenceNumber checkpointValue, String concurrencyToken) - throws KinesisClientLibException; + throws KinesisClientLibException; /** * Get the current checkpoint stored for the specified shard. Useful for checking that the parent shard @@ -58,7 +58,6 @@ public interface Checkpointer { */ Checkpoint getCheckpointObject(String leaseKey) throws KinesisClientLibException; - /** * Record intent to checkpoint for a shard. Upon failover, the pendingCheckpointValue will be passed to the new * ShardRecordProcessor's initialize() method. @@ -71,7 +70,7 @@ public interface Checkpointer { * @throws KinesisClientLibException Thrown if we were unable to save the checkpoint */ void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken) - throws KinesisClientLibException; + throws KinesisClientLibException; /** * Record intent to checkpoint for a shard. Upon failover, the pendingCheckpoint and pendingCheckpointState will be @@ -86,11 +85,14 @@ public interface Checkpointer { * * @throws KinesisClientLibException Thrown if we were unable to save the checkpoint */ - void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, byte[] pendingCheckpointState) + void prepareCheckpoint( + String leaseKey, + ExtendedSequenceNumber pendingCheckpoint, + String concurrencyToken, + byte[] pendingCheckpointState) throws KinesisClientLibException; void operation(String operation); String operation(); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/FormerStreamsLeasesDeletionStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/FormerStreamsLeasesDeletionStrategy.java index 232c428d..608aab50 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/FormerStreamsLeasesDeletionStrategy.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/FormerStreamsLeasesDeletionStrategy.java @@ -15,11 +15,11 @@ package software.amazon.kinesis.processor; -import software.amazon.kinesis.common.StreamIdentifier; - import java.time.Duration; import java.util.List; +import software.amazon.kinesis.common.StreamIdentifier; + /** * Strategy for cleaning up the leases for former streams. */ @@ -101,9 +101,4 @@ public interface FormerStreamsLeasesDeletionStrategy { return StreamsLeasesDeletionType.PROVIDED_STREAMS_DEFERRED_DELETION; } } - } - - - - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java index ead38333..cfff520d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java @@ -24,5 +24,4 @@ public interface MultiStreamTracker extends StreamTracker { default boolean isMultiStream() { return true; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java index a87f536a..ef4a40a1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java @@ -50,6 +50,5 @@ public interface PreparedCheckpointer { */ void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; - -} \ No newline at end of file + IllegalArgumentException; +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java index 04ea6614..d5366f3a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java @@ -15,9 +15,9 @@ package software.amazon.kinesis.processor; - import lombok.Data; - import lombok.NonNull; - import lombok.experimental.Accessors; +import lombok.Data; +import lombok.NonNull; +import lombok.experimental.Accessors; /** * Used by the KCL to configure the processor for processing the records. @@ -37,5 +37,4 @@ public class ProcessorConfig { *

    Default value: false

    */ private boolean callProcessRecordsEvenForEmptyRecordList = false; - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java index 34b2930c..2a868951 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java @@ -34,7 +34,7 @@ public interface RecordProcessorCheckpointer { * In steady state, applications should checkpoint periodically (e.g. once every 5 minutes). * Calling this API too frequently can slow down the application (because it puts pressure on the underlying * checkpoint storage layer). - * + * * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. * @throws ShutdownException The record processor instance has been shutdown. Another instance may have @@ -46,13 +46,13 @@ public interface RecordProcessorCheckpointer { * backoff and retry. */ void checkpoint() - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; /** * This method will checkpoint the progress at the provided record. This method is analogous to * {@link #checkpoint()} but provides the ability to specify the record at which to * checkpoint. - * + * * @param record A record at which to checkpoint in this shard. Upon failover, * the Kinesis Client Library will start fetching records after this record's sequence number. * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. @@ -66,13 +66,13 @@ public interface RecordProcessorCheckpointer { * backoff and retry. */ void checkpoint(Record record) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; /** * This method will checkpoint the progress at the provided sequenceNumber. This method is analogous to * {@link #checkpoint()} but provides the ability to specify the sequence number at which to * checkpoint. - * + * * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, * the Kinesis Client Library will start fetching records after this sequence number. * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. @@ -90,14 +90,14 @@ public interface RecordProcessorCheckpointer { * 2.) It is not a valid sequence number for a record in this shard. */ void checkpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException; /** * This method will checkpoint the progress at the provided sequenceNumber and subSequenceNumber, the latter for - * aggregated records produced with the Producer Library. This method is analogous to {@link #checkpoint()} + * aggregated records produced with the Producer Library. This method is analogous to {@link #checkpoint()} * but provides the ability to specify the sequence and subsequence numbers at which to checkpoint. - * + * * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, the Kinesis * Client Library will start fetching records after the given sequence and subsequence numbers. * @param subSequenceNumber A subsequence number at which to checkpoint within this shard. Upon failover, the @@ -117,8 +117,8 @@ public interface RecordProcessorCheckpointer { * 2.) It is not a valid sequence number for a record in this shard. */ void checkpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException; /** * This method will record a pending checkpoint at the last data record that was delivered to the record processor. @@ -236,7 +236,7 @@ public interface RecordProcessorCheckpointer { * {@link #prepareCheckpoint()} but provides the ability to specify the sequence number at which to checkpoint. * * @param sequenceNumber A sequence number at which to prepare checkpoint in this shard. - + * * @return an PreparedCheckpointer object that can be called later to persist the checkpoint. * * @throws ThrottlingException Can't store pending checkpoint. Can be caused by checkpointing too frequently. @@ -255,7 +255,7 @@ public interface RecordProcessorCheckpointer { */ PreparedCheckpointer prepareCheckpoint(String sequenceNumber) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + IllegalArgumentException; /** * This method will record a pending checkpoint at the provided sequenceNumber. This method is analogous to @@ -284,7 +284,7 @@ public interface RecordProcessorCheckpointer { */ PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + IllegalArgumentException; /** * This method will record a pending checkpoint at the provided sequenceNumber and subSequenceNumber, the latter for @@ -312,7 +312,7 @@ public interface RecordProcessorCheckpointer { */ PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + IllegalArgumentException; /** * This method will record a pending checkpoint at the provided sequenceNumber and subSequenceNumber, the latter for @@ -343,7 +343,7 @@ public interface RecordProcessorCheckpointer { */ PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + IllegalArgumentException; Checkpointer checkpointer(); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java index 9b8b6946..34cb3314 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java @@ -30,7 +30,7 @@ public interface ShardRecordProcessor { * Invoked by the Amazon Kinesis Client Library before data records are delivered to the ShardRecordProcessor instance * (via processRecords). * - * @param initializationInput Provides information related to initialization + * @param initializationInput Provides information related to initialization */ void initialize(InitializationInput initializationInput); @@ -48,7 +48,7 @@ public interface ShardRecordProcessor { /** * Called when the lease that tied to this record processor has been lost. Once the lease has been lost the record * processor can no longer checkpoint. - * + * * @param leaseLostInput * access to functions and data related to the loss of the lease. Currently this has no functionality. */ @@ -60,7 +60,7 @@ public interface ShardRecordProcessor { * * When this is called the record processor must call {@link RecordProcessorCheckpointer#checkpoint()}, * otherwise an exception will be thrown and the all child shards of this shard will not make progress. - * + * * @param shardEndedInput * provides access to a checkpointer method for completing processing of the shard. */ @@ -76,5 +76,4 @@ public interface ShardRecordProcessor { * completed. */ void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java index 0b3de8ab..3f6c03fc 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java @@ -16,16 +16,17 @@ package software.amazon.kinesis.processor; /** * Allows a record processor to indicate it's aware of requested shutdowns, and handle the request. + * @deprecated This class is not used, {@link ShardRecordProcessor} provide shutdownRequested + * notifications already. */ +@Deprecated public interface ShutdownNotificationAware { - /** * Called when the worker has been requested to shutdown, and gives the record processor a chance to checkpoint. * * The record processor will still have shutdown called. - * + * * @param checkpointer the checkpointer that can be used to save progress. */ void shutdownRequested(RecordProcessorCheckpointer checkpointer); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/SingleStreamTracker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/SingleStreamTracker.java index 703c4881..04075efa 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/SingleStreamTracker.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/SingleStreamTracker.java @@ -21,6 +21,7 @@ import java.util.List; import lombok.EqualsAndHashCode; import lombok.NonNull; import lombok.ToString; +import software.amazon.awssdk.arns.Arn; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; @@ -48,16 +49,23 @@ public class SingleStreamTracker implements StreamTracker { this(StreamIdentifier.singleStreamInstance(streamName)); } + public SingleStreamTracker(Arn streamArn) { + this(StreamIdentifier.singleStreamInstance(streamArn)); + } + public SingleStreamTracker(StreamIdentifier streamIdentifier) { this(streamIdentifier, DEFAULT_POSITION_IN_STREAM); } public SingleStreamTracker( - StreamIdentifier streamIdentifier, - @NonNull InitialPositionInStreamExtended initialPosition) { + StreamIdentifier streamIdentifier, @NonNull InitialPositionInStreamExtended initialPosition) { this(streamIdentifier, new StreamConfig(streamIdentifier, initialPosition)); } + public SingleStreamTracker(String streamName, @NonNull InitialPositionInStreamExtended initialPosition) { + this(StreamIdentifier.singleStreamInstance(streamName), initialPosition); + } + public SingleStreamTracker(@NonNull StreamIdentifier streamIdentifier, @NonNull StreamConfig streamConfig) { this.streamIdentifier = streamIdentifier; this.streamConfigs = Collections.singletonList(streamConfig); @@ -77,5 +85,4 @@ public class SingleStreamTracker implements StreamTracker { public boolean isMultiStream() { return false; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/StreamTracker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/StreamTracker.java index befa3709..48b5c88d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/StreamTracker.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/StreamTracker.java @@ -15,13 +15,13 @@ package software.amazon.kinesis.processor; +import java.util.List; + import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; -import java.util.List; - /** * Interface for stream trackers. * KCL will periodically probe this interface to learn about the new and old streams. @@ -81,5 +81,4 @@ public interface StreamTracker { *

    */ boolean isMultiStream(); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java index 8081b946..c5368ad4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java @@ -32,14 +32,15 @@ import software.amazon.kinesis.annotations.KinesisClientInternalApi; */ @KinesisClientInternalApi public class AWSExceptionManager { - private final Map, Function> map = new HashMap<>(); + private final Map, Function> map = + new HashMap<>(); @Setter @Accessors(fluent = true) private Function defaultFunction = RuntimeException::new; - public void add(@NonNull final Class clazz, - @NonNull final Function function) { + public void add( + @NonNull final Class clazz, @NonNull final Function function) { map.put(clazz, function); } @@ -66,5 +67,4 @@ public class AWSExceptionManager { (Function) handleFor(t); return f.apply(t); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java index 533f47ab..fc28274a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java @@ -24,12 +24,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import org.apache.commons.lang3.StringUtils; - import com.google.protobuf.InvalidProtocolBufferException; - import lombok.NonNull; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import software.amazon.kinesis.retrieval.kpl.Messages; /** @@ -37,7 +35,7 @@ import software.amazon.kinesis.retrieval.kpl.Messages; */ @Slf4j public class AggregatorUtil { - public static final byte[] AGGREGATED_RECORD_MAGIC = new byte[]{-13, -119, -102, -62}; + public static final byte[] AGGREGATED_RECORD_MAGIC = new byte[] {-13, -119, -102, -62}; private static final int DIGEST_SIZE = 16; private static final BigInteger STARTING_HASH_KEY = new BigInteger("0"); // largest hash key = 2^128-1 @@ -58,7 +56,7 @@ public class AggregatorUtil { /** * Deaggregate any KPL records found. This method converts the starting and ending hash keys to {@link BigInteger}s * before passing them on to {@link #deaggregate(List, BigInteger, BigInteger)} - * + * * @param records * the records to potentially deaggreate * @param startingHashKey @@ -67,8 +65,8 @@ public class AggregatorUtil { * the ending hash key of the shard * @return A list of records with any aggregate records deaggregated */ - public List deaggregate(List records, String startingHashKey, - String endingHashKey) { + public List deaggregate( + List records, String startingHashKey, String endingHashKey) { return deaggregate(records, new BigInteger(startingHashKey), new BigInteger(endingHashKey)); } @@ -91,9 +89,8 @@ public class AggregatorUtil { * the endingHashKey. */ // CHECKSTYLE:OFF NPathComplexity - public List deaggregate(List records, - BigInteger startingHashKey, - BigInteger endingHashKey) { + public List deaggregate( + List records, BigInteger startingHashKey, BigInteger endingHashKey) { List result = new ArrayList<>(); byte[] magic = new byte[AGGREGATED_RECORD_MAGIC.length]; byte[] digest = new byte[DIGEST_SIZE]; @@ -130,7 +127,8 @@ public class AggregatorUtil { List pks = ar.getPartitionKeyTableList(); List ehks = ar.getExplicitHashKeyTableList(); long aat = r.approximateArrivalTimestamp() == null - ? -1 : r.approximateArrivalTimestamp().toEpochMilli(); + ? -1 + : r.approximateArrivalTimestamp().toEpochMilli(); try { int recordsInCurrRecord = 0; for (Messages.Record mr : ar.getRecordsList()) { @@ -157,7 +155,8 @@ public class AggregatorUtil { .partitionKey(partitionKey) .explicitHashKey(explicitHashKey) .build(); - result.add(convertRecordToKinesisClientRecord(record, true, subSeqNum++, explicitHashKey)); + result.add( + convertRecordToKinesisClientRecord(record, true, subSeqNum++, explicitHashKey)); } } catch (Exception e) { StringBuilder sb = new StringBuilder(); @@ -171,14 +170,25 @@ public class AggregatorUtil { sb.append(s).append("\n"); } for (Messages.Record mr : ar.getRecordsList()) { - sb.append("Record: [hasEhk=").append(mr.hasExplicitHashKeyIndex()).append(", ") - .append("ehkIdx=").append(mr.getExplicitHashKeyIndex()).append(", ") - .append("pkIdx=").append(mr.getPartitionKeyIndex()).append(", ") - .append("dataLen=").append(mr.getData().toByteArray().length).append("]\n"); + sb.append("Record: [hasEhk=") + .append(mr.hasExplicitHashKeyIndex()) + .append(", ") + .append("ehkIdx=") + .append(mr.getExplicitHashKeyIndex()) + .append(", ") + .append("pkIdx=") + .append(mr.getPartitionKeyIndex()) + .append(", ") + .append("dataLen=") + .append(mr.getData().toByteArray().length) + .append("]\n"); } - sb.append("Sequence number: ").append(r.sequenceNumber()).append("\n") + sb.append("Sequence number: ") + .append(r.sequenceNumber()) + .append("\n") .append("Raw data: ") - .append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData)).append("\n"); + .append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData)) + .append("\n"); log.error(sb.toString(), e); } } catch (InvalidProtocolBufferException e) { @@ -199,7 +209,8 @@ public class AggregatorUtil { return md5(data); } - protected BigInteger effectiveHashKey(String partitionKey, String explicitHashKey) throws UnsupportedEncodingException { + protected BigInteger effectiveHashKey(String partitionKey, String explicitHashKey) + throws UnsupportedEncodingException { if (explicitHashKey == null) { return new BigInteger(1, md5(partitionKey.getBytes("UTF-8"))); } @@ -215,10 +226,11 @@ public class AggregatorUtil { } } - public KinesisClientRecord convertRecordToKinesisClientRecord(@NonNull final KinesisClientRecord record, - final boolean aggregated, - final long subSequenceNumber, - final String explicitHashKey) { + public KinesisClientRecord convertRecordToKinesisClientRecord( + @NonNull final KinesisClientRecord record, + final boolean aggregated, + final long subSequenceNumber, + final String explicitHashKey) { return KinesisClientRecord.builder() .data(record.data()) .partitionKey(record.partitionKey()) diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherProviderConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherProviderConfig.java index b5c7b23e..d8395c9a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherProviderConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherProviderConfig.java @@ -16,19 +16,20 @@ package software.amazon.kinesis.retrieval; import java.time.Duration; + import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.metrics.MetricsFactory; public interface DataFetcherProviderConfig { /** - * Gets stream identifier for dataFetcher. - */ + * Gets stream identifier for dataFetcher. + */ StreamIdentifier getStreamIdentifier(); /** - * Gets shard id. - */ + * Gets shard id. + */ String getShardId(); /** diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java index 5ed68765..82e9da36 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java @@ -22,7 +22,7 @@ import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; public interface DataFetcherResult { /** * The result of the request to Kinesis - * + * * @return The result of the request, this can be null if the request failed. */ GetRecordsResponse getResult(); @@ -30,14 +30,14 @@ public interface DataFetcherResult { /** * Accepts the result, and advances the shard iterator. A result from the data fetcher must be accepted before any * further progress can be made. - * + * * @return the result of the request, this can be null if the request failed. */ GetRecordsResponse accept(); /** * Indicates whether this result is at the end of the shard or not - * + * * @return true if the result is at the end of a shard, false otherwise */ boolean isShardEnd(); diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java index 529016ee..ba0ec587 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java @@ -18,5 +18,6 @@ package software.amazon.kinesis.retrieval; * */ public enum DataFetchingStrategy { - DEFAULT, PREFETCH_CACHED; + DEFAULT, + PREFETCH_CACHED; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataRetrievalUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataRetrievalUtil.java index ba743e61..72ecd19a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataRetrievalUtil.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataRetrievalUtil.java @@ -15,21 +15,23 @@ package software.amazon.kinesis.retrieval; +import java.util.List; + import software.amazon.awssdk.services.kinesis.model.ChildShard; import software.amazon.awssdk.utils.CollectionUtils; -import java.util.List; - public class DataRetrievalUtil { public static boolean isValidResult(String shardEndIndicator, List childShards) { - // shardEndIndicator is nextShardIterator for GetRecordsResponse, and is continuationSequenceNumber for SubscribeToShardEvent + // shardEndIndicator is nextShardIterator for GetRecordsResponse, and is continuationSequenceNumber for + // SubscribeToShardEvent // There are two valid scenarios for the shardEndIndicator and childShards combination. // 1. ShardEnd scenario: shardEndIndicator should be null and childShards should be a non-empty list. - // 2. Non-ShardEnd scenario: shardEndIndicator should be non-null and childShards should be null or an empty list. + // 2. Non-ShardEnd scenario: shardEndIndicator should be non-null and childShards should be null or an empty + // list. // Otherwise, the retrieval result is invalid. - if (shardEndIndicator == null && CollectionUtils.isNullOrEmpty(childShards) || - shardEndIndicator != null && !CollectionUtils.isNullOrEmpty(childShards)) { + if (shardEndIndicator == null && CollectionUtils.isNullOrEmpty(childShards) + || shardEndIndicator != null && !CollectionUtils.isNullOrEmpty(childShards)) { return false; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java index 3ff8e620..ca0cfbe8 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java @@ -15,6 +15,7 @@ package software.amazon.kinesis.retrieval; import java.util.Optional; + import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; import software.amazon.kinesis.retrieval.polling.DataFetcher; import software.amazon.kinesis.retrieval.polling.KinesisDataFetcher; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java index 158413f9..42942813 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java @@ -17,7 +17,7 @@ package software.amazon.kinesis.retrieval; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; /** - * This class uses the GetRecordsRetrievalStrategy class to retrieve the next set of records and update the cache. + * This class uses the GetRecordsRetrievalStrategy class to retrieve the next set of records and update the cache. */ public interface GetRecordsRetriever { GetRecordsResponse getNextRecords(int maxRecords); diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java index 9e9adf91..bead706d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java @@ -16,28 +16,43 @@ import software.amazon.kinesis.common.InitialPositionInStreamExtended; @KinesisClientInternalApi public class IteratorBuilder { - public static SubscribeToShardRequest.Builder request(SubscribeToShardRequest.Builder builder, - String sequenceNumber, InitialPositionInStreamExtended initialPosition) { - return builder.startingPosition(request(StartingPosition.builder(), sequenceNumber, initialPosition).build()); - } - - public static SubscribeToShardRequest.Builder reconnectRequest(SubscribeToShardRequest.Builder builder, - String sequenceNumber, InitialPositionInStreamExtended initialPosition) { - return builder.startingPosition( - reconnectRequest(StartingPosition.builder(), sequenceNumber, initialPosition).build()); - } - - public static StartingPosition.Builder request(StartingPosition.Builder builder, String sequenceNumber, + public static SubscribeToShardRequest.Builder request( + SubscribeToShardRequest.Builder builder, + String sequenceNumber, InitialPositionInStreamExtended initialPosition) { - return apply(builder, StartingPosition.Builder::type, StartingPosition.Builder::timestamp, - StartingPosition.Builder::sequenceNumber, initialPosition, sequenceNumber, + return builder.startingPosition(request(StartingPosition.builder(), sequenceNumber, initialPosition) + .build()); + } + + public static SubscribeToShardRequest.Builder reconnectRequest( + SubscribeToShardRequest.Builder builder, + String sequenceNumber, + InitialPositionInStreamExtended initialPosition) { + return builder.startingPosition(reconnectRequest(StartingPosition.builder(), sequenceNumber, initialPosition) + .build()); + } + + public static StartingPosition.Builder request( + StartingPosition.Builder builder, String sequenceNumber, InitialPositionInStreamExtended initialPosition) { + return apply( + builder, + StartingPosition.Builder::type, + StartingPosition.Builder::timestamp, + StartingPosition.Builder::sequenceNumber, + initialPosition, + sequenceNumber, ShardIteratorType.AT_SEQUENCE_NUMBER); } - public static StartingPosition.Builder reconnectRequest(StartingPosition.Builder builder, String sequenceNumber, - InitialPositionInStreamExtended initialPosition) { - return apply(builder, StartingPosition.Builder::type, StartingPosition.Builder::timestamp, - StartingPosition.Builder::sequenceNumber, initialPosition, sequenceNumber, + public static StartingPosition.Builder reconnectRequest( + StartingPosition.Builder builder, String sequenceNumber, InitialPositionInStreamExtended initialPosition) { + return apply( + builder, + StartingPosition.Builder::type, + StartingPosition.Builder::timestamp, + StartingPosition.Builder::sequenceNumber, + initialPosition, + sequenceNumber, ShardIteratorType.AFTER_SEQUENCE_NUMBER); } @@ -49,11 +64,11 @@ public class IteratorBuilder { * @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP. * @return An updated GetShardIteratorRequest.Builder. */ - public static GetShardIteratorRequest.Builder request(GetShardIteratorRequest.Builder builder, - String sequenceNumber, - InitialPositionInStreamExtended initialPosition) { + public static GetShardIteratorRequest.Builder request( + GetShardIteratorRequest.Builder builder, + String sequenceNumber, + InitialPositionInStreamExtended initialPosition) { return getShardIteratorRequest(builder, sequenceNumber, initialPosition, ShardIteratorType.AT_SEQUENCE_NUMBER); - } /** @@ -64,22 +79,30 @@ public class IteratorBuilder { * @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP. * @return An updated GetShardIteratorRequest.Builder. */ - public static GetShardIteratorRequest.Builder reconnectRequest(GetShardIteratorRequest.Builder builder, - String sequenceNumber, - InitialPositionInStreamExtended initialPosition) { - return getShardIteratorRequest(builder, sequenceNumber, initialPosition, ShardIteratorType.AFTER_SEQUENCE_NUMBER); + public static GetShardIteratorRequest.Builder reconnectRequest( + GetShardIteratorRequest.Builder builder, + String sequenceNumber, + InitialPositionInStreamExtended initialPosition) { + return getShardIteratorRequest( + builder, sequenceNumber, initialPosition, ShardIteratorType.AFTER_SEQUENCE_NUMBER); } - private static GetShardIteratorRequest.Builder getShardIteratorRequest(GetShardIteratorRequest.Builder builder, - String sequenceNumber, - InitialPositionInStreamExtended initialPosition, - ShardIteratorType shardIteratorType) { - return apply(builder, GetShardIteratorRequest.Builder::shardIteratorType, GetShardIteratorRequest.Builder::timestamp, - GetShardIteratorRequest.Builder::startingSequenceNumber, initialPosition, sequenceNumber, + private static GetShardIteratorRequest.Builder getShardIteratorRequest( + GetShardIteratorRequest.Builder builder, + String sequenceNumber, + InitialPositionInStreamExtended initialPosition, + ShardIteratorType shardIteratorType) { + return apply( + builder, + GetShardIteratorRequest.Builder::shardIteratorType, + GetShardIteratorRequest.Builder::timestamp, + GetShardIteratorRequest.Builder::startingSequenceNumber, + initialPosition, + sequenceNumber, shardIteratorType); } - private final static Map SHARD_ITERATOR_MAPPING; + private static final Map SHARD_ITERATOR_MAPPING; static { Map map = new HashMap<>(); @@ -95,22 +118,25 @@ public class IteratorBuilder { R apply(R updated, T value); } - private static R apply(R initial, UpdatingFunction shardIterFunc, - UpdatingFunction dateFunc, UpdatingFunction sequenceFunction, - InitialPositionInStreamExtended initialPositionInStreamExtended, String sequenceNumber, + private static R apply( + R initial, + UpdatingFunction shardIterFunc, + UpdatingFunction dateFunc, + UpdatingFunction sequenceFunction, + InitialPositionInStreamExtended initialPositionInStreamExtended, + String sequenceNumber, ShardIteratorType defaultIteratorType) { - ShardIteratorType iteratorType = SHARD_ITERATOR_MAPPING.getOrDefault( - sequenceNumber, defaultIteratorType); + ShardIteratorType iteratorType = SHARD_ITERATOR_MAPPING.getOrDefault(sequenceNumber, defaultIteratorType); R result = shardIterFunc.apply(initial, iteratorType); switch (iteratorType) { - case AT_TIMESTAMP: - return dateFunc.apply(result, initialPositionInStreamExtended.getTimestamp().toInstant()); - case AT_SEQUENCE_NUMBER: - case AFTER_SEQUENCE_NUMBER: - return sequenceFunction.apply(result, sequenceNumber); - default: - return result; + case AT_TIMESTAMP: + return dateFunc.apply( + result, initialPositionInStreamExtended.getTimestamp().toInstant()); + case AT_SEQUENCE_NUMBER: + case AFTER_SEQUENCE_NUMBER: + return sequenceFunction.apply(result, sequenceNumber); + default: + return result; } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java index 8a3d4d13..5e8018f9 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java @@ -47,8 +47,12 @@ public class KinesisClientRecord { private final Schema schema; public static KinesisClientRecord fromRecord(Record record) { - return KinesisClientRecord.builder().sequenceNumber(record.sequenceNumber()) - .approximateArrivalTimestamp(record.approximateArrivalTimestamp()).data(record.data().asByteBuffer()) - .partitionKey(record.partitionKey()).encryptionType(record.encryptionType()).build(); + return KinesisClientRecord.builder() + .sequenceNumber(record.sequenceNumber()) + .approximateArrivalTimestamp(record.approximateArrivalTimestamp()) + .data(record.data().asByteBuffer()) + .partitionKey(record.partitionKey()) + .encryptionType(record.encryptionType()) + .build(); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisDataFetcherProviderConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisDataFetcherProviderConfig.java index 7cf6cdcf..68f8f7c2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisDataFetcherProviderConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisDataFetcherProviderConfig.java @@ -16,12 +16,12 @@ package software.amazon.kinesis.retrieval; import java.time.Duration; + import lombok.Data; import lombok.NonNull; import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.metrics.MetricsFactory; - /** * Configuration needed for custom data fetchers */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsDeliveryAck.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsDeliveryAck.java index 487e1637..a4b255fb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsDeliveryAck.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsDeliveryAck.java @@ -25,5 +25,4 @@ public interface RecordsDeliveryAck { * @return id that uniquely determines a record batch and its source. */ BatchUniqueIdentifier batchUniqueIdentifier(); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java index 328273b2..eb62a98e 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java @@ -27,11 +27,14 @@ public interface RecordsFetcherFactory { * @param shardId ShardId of the shard that the fetcher will retrieve records for * @param metricsFactory MetricsFactory used to create metricScope * @param maxRecords Max number of records to be returned in a single get call - * + * * @return RecordsPublisher used to get records from Kinesis. */ - RecordsPublisher createRecordsFetcher(GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, String shardId, - MetricsFactory metricsFactory, int maxRecords); + RecordsPublisher createRecordsFetcher( + GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, + String shardId, + MetricsFactory metricsFactory, + int maxRecords); /** * Sets the maximum number of ProcessRecordsInput objects the RecordsPublisher can hold, before further requests are @@ -82,5 +85,4 @@ public interface RecordsFetcherFactory { void idleMillisBetweenCalls(long idleMillisBetweenCalls); long idleMillisBetweenCalls(); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java index 98c0375e..60507c25 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java @@ -16,7 +16,6 @@ package software.amazon.kinesis.retrieval; import org.reactivestreams.Publisher; - import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.RequestDetails; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; @@ -35,7 +34,9 @@ public interface RecordsPublisher extends Publisher { * @param initialPositionInStreamExtended * if there is no sequence number the initial position to use */ - void start(ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended); + void start( + ExtendedSequenceNumber extendedSequenceNumber, + InitialPositionInStreamExtended initialPositionInStreamExtended); /** * Restart from the last accepted and processed diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsRetrieved.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsRetrieved.java index f6f5bb7f..d2a3ddc2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsRetrieved.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsRetrieved.java @@ -20,7 +20,7 @@ public interface RecordsRetrieved { /** * Retrieves the records that have been received via one of the publishers - * + * * @return the processRecordsInput received */ ProcessRecordsInput processRecordsInput(); diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java index f45fa80d..fdd6c445 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java @@ -22,6 +22,7 @@ import lombok.NonNull; import lombok.Setter; import lombok.ToString; import lombok.experimental.Accessors; +import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.utils.Either; import software.amazon.kinesis.common.DeprecationUtils; @@ -33,7 +34,6 @@ import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.SingleStreamTracker; import software.amazon.kinesis.processor.StreamTracker; import software.amazon.kinesis.retrieval.fanout.FanOutConfig; -import software.amazon.kinesis.retrieval.polling.PollingConfig; /** * Used by the KCL to configure the retrieval of records from Kinesis. @@ -49,7 +49,7 @@ public class RetrievalConfig { */ public static final String KINESIS_CLIENT_LIB_USER_AGENT = "amazon-kinesis-client-library-java"; - public static final String KINESIS_CLIENT_LIB_USER_AGENT_VERSION = "2.4.6-SNAPSHOT"; + public static final String KINESIS_CLIENT_LIB_USER_AGENT_VERSION = "2.6.1-SNAPSHOT"; /** * Client used to make calls to Kinesis for records retrieval @@ -111,28 +111,40 @@ public class RetrievalConfig { * @see StreamTracker#createStreamConfig(StreamIdentifier) */ @Deprecated - private InitialPositionInStreamExtended initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); + private InitialPositionInStreamExtended initialPositionInStreamExtended = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); private RetrievalSpecificConfig retrievalSpecificConfig; private RetrievalFactory retrievalFactory; - public RetrievalConfig(@NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull String streamName, - @NonNull String applicationName) { + public RetrievalConfig( + @NonNull KinesisAsyncClient kinesisAsyncClient, + @NonNull String streamName, + @NonNull String applicationName) { this(kinesisAsyncClient, new SingleStreamTracker(streamName), applicationName); } - public RetrievalConfig(@NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull StreamTracker streamTracker, - @NonNull String applicationName) { + public RetrievalConfig( + @NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull Arn streamArn, @NonNull String applicationName) { + this(kinesisAsyncClient, new SingleStreamTracker(streamArn), applicationName); + } + + public RetrievalConfig( + @NonNull KinesisAsyncClient kinesisAsyncClient, + @NonNull StreamTracker streamTracker, + @NonNull String applicationName) { this.kinesisClient = kinesisAsyncClient; this.streamTracker = streamTracker; this.applicationName = applicationName; - this.appStreamTracker = DeprecationUtils.convert(streamTracker, + this.appStreamTracker = DeprecationUtils.convert( + streamTracker, singleStreamTracker -> singleStreamTracker.streamConfigList().get(0)); } /** + * Convenience method to reconfigure the embedded {@link StreamTracker}, + * but only when not in multi-stream mode. * * @param initialPositionInStreamExtended * @@ -141,63 +153,46 @@ public class RetrievalConfig { * @see StreamTracker#createStreamConfig(StreamIdentifier) */ @Deprecated - public RetrievalConfig initialPositionInStreamExtended(InitialPositionInStreamExtended initialPositionInStreamExtended) { - this.appStreamTracker.apply(multiStreamTracker -> { + public RetrievalConfig initialPositionInStreamExtended( + InitialPositionInStreamExtended initialPositionInStreamExtended) { + if (streamTracker().isMultiStream()) { throw new IllegalArgumentException( "Cannot set initialPositionInStreamExtended when multiStreamTracker is set"); - }, sc -> { - final StreamConfig updatedConfig = new StreamConfig(sc.streamIdentifier(), initialPositionInStreamExtended); - streamTracker = new SingleStreamTracker(sc.streamIdentifier(), updatedConfig); - appStreamTracker = Either.right(updatedConfig); - }); + } + + final StreamIdentifier streamIdentifier = getSingleStreamIdentifier(); + final StreamConfig updatedConfig = new StreamConfig(streamIdentifier, initialPositionInStreamExtended); + streamTracker = new SingleStreamTracker(streamIdentifier, updatedConfig); + appStreamTracker = Either.right(updatedConfig); return this; } public RetrievalConfig retrievalSpecificConfig(RetrievalSpecificConfig retrievalSpecificConfig) { + retrievalSpecificConfig.validateState(streamTracker.isMultiStream()); this.retrievalSpecificConfig = retrievalSpecificConfig; - validateFanoutConfig(); - validatePollingConfig(); return this; } public RetrievalFactory retrievalFactory() { if (retrievalFactory == null) { if (retrievalSpecificConfig == null) { - retrievalSpecificConfig = new FanOutConfig(kinesisClient()) - .applicationName(applicationName()); - retrievalSpecificConfig = appStreamTracker.map(multiStreamTracker -> retrievalSpecificConfig, - streamConfig -> ((FanOutConfig) retrievalSpecificConfig).streamName(streamConfig.streamIdentifier().streamName())); + final FanOutConfig fanOutConfig = new FanOutConfig(kinesisClient()).applicationName(applicationName()); + if (!streamTracker.isMultiStream()) { + final String streamName = getSingleStreamIdentifier().streamName(); + fanOutConfig.streamName(streamName); + } + retrievalSpecificConfig(fanOutConfig); } retrievalFactory = retrievalSpecificConfig.retrievalFactory(); } return retrievalFactory; } - private void validateFanoutConfig() { - // If we are in multistream mode and if retrievalSpecificConfig is an instance of FanOutConfig and if consumerArn is set throw exception. - boolean isFanoutConfig = retrievalSpecificConfig instanceof FanOutConfig; - boolean isInvalidFanoutConfig = isFanoutConfig && appStreamTracker.map( - multiStreamTracker -> ((FanOutConfig) retrievalSpecificConfig).consumerArn() != null - || ((FanOutConfig) retrievalSpecificConfig).streamName() != null, - streamConfig -> streamConfig.streamIdentifier() == null - || streamConfig.streamIdentifier().streamName() == null); - if(isInvalidFanoutConfig) { - throw new IllegalArgumentException( - "Invalid config: Either in multi-stream mode with streamName/consumerArn configured or in single-stream mode with no streamName configured"); - } - } - - private void validatePollingConfig() { - boolean isPollingConfig = retrievalSpecificConfig instanceof PollingConfig; - boolean isInvalidPollingConfig = isPollingConfig && appStreamTracker.map( - multiStreamTracker -> - ((PollingConfig) retrievalSpecificConfig).streamName() != null, - streamConfig -> - streamConfig.streamIdentifier() == null || streamConfig.streamIdentifier().streamName() == null); - - if (isInvalidPollingConfig) { - throw new IllegalArgumentException( - "Invalid config: Either in multi-stream mode with streamName configured or in single-stream mode with no streamName configured"); - } + /** + * Convenience method to return the {@link StreamIdentifier} from a + * single-stream tracker. + */ + private StreamIdentifier getSingleStreamIdentifier() { + return streamTracker.streamConfigList().get(0).streamIdentifier(); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java index 5703e1af..72b75074 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java @@ -23,12 +23,36 @@ import software.amazon.kinesis.metrics.MetricsFactory; * */ public interface RetrievalFactory { - GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy(ShardInfo shardInfo, MetricsFactory metricsFactory); + /** + * @deprecated This method was only used by specific implementations of {@link RetrievalFactory} and should not be + * required to be implemented; will be removed in future versions. + */ @Deprecated - RecordsPublisher createGetRecordsCache(ShardInfo shardInfo, MetricsFactory metricsFactory); + default GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy( + ShardInfo shardInfo, MetricsFactory metricsFactory) { + throw new UnsupportedOperationException("This method is deprecated and should not be used."); + } - default RecordsPublisher createGetRecordsCache(ShardInfo shardInfo, StreamConfig streamConfig, MetricsFactory metricsFactory) { + /** + * @deprecated This method is deprecated and will be removed in future versions. + * Please use {@link #createGetRecordsCache(ShardInfo, StreamConfig, MetricsFactory)}. + */ + @Deprecated + default RecordsPublisher createGetRecordsCache(ShardInfo shardInfo, MetricsFactory metricsFactory) { + throw new UnsupportedOperationException("This method is deprecated and should not be used."); + } + + /** + * Creates a {@link RecordsPublisher} instance to retrieve records for the specified shard. + * + * @param shardInfo The {@link ShardInfo} representing the shard for which records are to be retrieved. + * @param streamConfig The {@link StreamConfig} containing details for the stream. + * @param metricsFactory The {@link MetricsFactory} for recording metrics. + * @return A {@link RecordsPublisher} instance for retrieving records from the shard. + */ + default RecordsPublisher createGetRecordsCache( + ShardInfo shardInfo, StreamConfig streamConfig, MetricsFactory metricsFactory) { return createGetRecordsCache(shardInfo, metricsFactory); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java index 30562994..4aa2114a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java @@ -15,9 +15,6 @@ package software.amazon.kinesis.retrieval; -import java.util.function.Function; -import software.amazon.kinesis.retrieval.polling.DataFetcher; - public interface RetrievalSpecificConfig { /** * Creates and returns a retrieval factory for the specific configuration @@ -25,4 +22,22 @@ public interface RetrievalSpecificConfig { * @return a retrieval factory that can create an appropriate retriever */ RetrievalFactory retrievalFactory(); + + /** + * Validates this instance is configured properly. For example, this + * method may validate that the stream name, if one is required, is + * non-null. + *

    + * If not in a valid state, an informative unchecked Exception -- for + * example, an {@link IllegalArgumentException} -- should be thrown so + * the caller may rectify the misconfiguration. + * + * @param isMultiStream whether state should be validated for multi-stream + * + * @deprecated remove keyword `default` to force implementation-specific behavior + */ + @Deprecated + default void validateState(boolean isMultiStream) { + // TODO convert this to a non-default implementation in a "major" release + } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java index 91192ad3..01890354 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java @@ -14,10 +14,9 @@ */ package software.amazon.kinesis.retrieval; -import org.slf4j.Logger; - import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.slf4j.Logger; @RequiredArgsConstructor @Slf4j @@ -30,15 +29,13 @@ public class ThrottlingReporter { public void throttled() { consecutiveThrottles++; - String message = "Shard '" + shardId + "' has been throttled " - + consecutiveThrottles + " consecutively"; + String message = "Shard '" + shardId + "' has been throttled " + consecutiveThrottles + " consecutively"; if (consecutiveThrottles > maxConsecutiveWarnThrottles) { getLog().error(message); } else { getLog().warn(message); } - } public void success() { @@ -48,5 +45,4 @@ public class ThrottlingReporter { protected Logger getLog() { return log; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java index 9318b996..346f30f4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java @@ -15,13 +15,11 @@ package software.amazon.kinesis.retrieval.fanout; -import org.apache.commons.lang3.ObjectUtils; - import com.google.common.base.Preconditions; - import lombok.Data; import lombok.NonNull; import lombok.experimental.Accessors; +import org.apache.commons.lang3.ObjectUtils; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.retrieval.RetrievalFactory; @@ -80,10 +78,21 @@ public class FanOutConfig implements RetrievalSpecificConfig { */ private long retryBackoffMillis = 1000; - @Override public RetrievalFactory retrievalFactory() { + @Override + public RetrievalFactory retrievalFactory() { return new FanOutRetrievalFactory(kinesisClient, streamName, consumerArn, this::getOrCreateConsumerArn); } + @Override + public void validateState(final boolean isMultiStream) { + if (isMultiStream) { + if ((streamName() != null) || (consumerArn() != null)) { + throw new IllegalArgumentException( + "FanOutConfig must not have streamName/consumerArn configured in multi-stream mode"); + } + } + } + private String getOrCreateConsumerArn(String streamName) { FanOutConsumerRegistration registration = createConsumerRegistration(streamName); try { @@ -95,17 +104,22 @@ public class FanOutConfig implements RetrievalSpecificConfig { private FanOutConsumerRegistration createConsumerRegistration(String streamName) { String consumerToCreate = ObjectUtils.firstNonNull(consumerName(), applicationName()); - return createConsumerRegistration(kinesisClient(), + return createConsumerRegistration( + kinesisClient(), Preconditions.checkNotNull(streamName, "streamName must be set for consumer creation"), - Preconditions.checkNotNull(consumerToCreate, - "applicationName or consumerName must be set for consumer creation")); - + Preconditions.checkNotNull( + consumerToCreate, "applicationName or consumerName must be set for consumer creation")); } - protected FanOutConsumerRegistration createConsumerRegistration(KinesisAsyncClient client, String stream, - String consumerToCreate) { - return new FanOutConsumerRegistration(client, stream, consumerToCreate, maxDescribeStreamSummaryRetries(), - maxDescribeStreamConsumerRetries(), registerStreamConsumerRetries(), retryBackoffMillis()); + protected FanOutConsumerRegistration createConsumerRegistration( + KinesisAsyncClient client, String stream, String consumerToCreate) { + return new FanOutConsumerRegistration( + client, + stream, + consumerToCreate, + maxDescribeStreamSummaryRetries(), + maxDescribeStreamConsumerRetries(), + registerStreamConsumerRetries(), + retryBackoffMillis()); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java index 9bcdd83c..eaf699a3 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java @@ -17,14 +17,13 @@ package software.amazon.kinesis.retrieval.fanout; import java.util.concurrent.ExecutionException; -import org.apache.commons.lang3.StringUtils; - import lombok.AccessLevel; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.Setter; import lombok.experimental.Accessors; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.ConsumerStatus; import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; @@ -52,15 +51,19 @@ import software.amazon.kinesis.retrieval.ConsumerRegistration; public class FanOutConsumerRegistration implements ConsumerRegistration { @NonNull private final KinesisAsyncClient kinesisClient; + private final String streamName; + @NonNull private final String streamConsumerName; + private final int maxDescribeStreamSummaryRetries; private final int maxDescribeStreamConsumerRetries; private final int registerStreamConsumerRetries; private final long retryBackoffMillis; private String streamArn; + @Setter(AccessLevel.PRIVATE) private String streamConsumerArn; @@ -104,7 +107,9 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { } } catch (ResourceInUseException e) { // Consumer is present, call DescribeStreamConsumer - log.debug("{} : Got ResourceInUseException consumer exists, will call DescribeStreamConsumer again.", streamName); + log.debug( + "{} : Got ResourceInUseException consumer exists, will call DescribeStreamConsumer again.", + streamName); response = describeStreamConsumer(); } } @@ -123,9 +128,10 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { private RegisterStreamConsumerResponse registerStreamConsumer() throws DependencyException { final AWSExceptionManager exceptionManager = createExceptionManager(); try { - final RegisterStreamConsumerRequest request = KinesisRequestsBuilder - .registerStreamConsumerRequestBuilder().streamARN(streamArn()) - .consumerName(streamConsumerName).build(); + final RegisterStreamConsumerRequest request = KinesisRequestsBuilder.registerStreamConsumerRequestBuilder() + .streamARN(streamArn()) + .consumerName(streamConsumerName) + .build(); return kinesisClient.registerStreamConsumer(request).get(); } catch (ExecutionException e) { throw exceptionManager.apply(e.getCause()); @@ -135,18 +141,21 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { } private DescribeStreamConsumerResponse describeStreamConsumer() throws DependencyException { - final DescribeStreamConsumerRequest.Builder requestBuilder = KinesisRequestsBuilder - .describeStreamConsumerRequestBuilder(); + final DescribeStreamConsumerRequest.Builder requestBuilder = + KinesisRequestsBuilder.describeStreamConsumerRequestBuilder(); final DescribeStreamConsumerRequest request; if (StringUtils.isEmpty(streamConsumerArn)) { - request = requestBuilder.streamARN(streamArn()).consumerName(streamConsumerName).build(); + request = requestBuilder + .streamARN(streamArn()) + .consumerName(streamConsumerName) + .build(); } else { request = requestBuilder.consumerARN(streamConsumerArn).build(); } - final ServiceCallerSupplier dsc = () -> kinesisClient - .describeStreamConsumer(request).get(); + final ServiceCallerSupplier dsc = + () -> kinesisClient.describeStreamConsumer(request).get(); return retryWhenThrottled(dsc, maxDescribeStreamConsumerRetries, "DescribeStreamConsumer"); } @@ -178,10 +187,14 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { private String streamArn() throws DependencyException { if (StringUtils.isEmpty(streamArn)) { - final DescribeStreamSummaryRequest request = KinesisRequestsBuilder - .describeStreamSummaryRequestBuilder().streamName(streamName).build(); - final ServiceCallerSupplier dss = () -> kinesisClient.describeStreamSummary(request).get() - .streamDescriptionSummary().streamARN(); + final DescribeStreamSummaryRequest request = KinesisRequestsBuilder.describeStreamSummaryRequestBuilder() + .streamName(streamName) + .build(); + final ServiceCallerSupplier dss = () -> kinesisClient + .describeStreamSummary(request) + .get() + .streamDescriptionSummary() + .streamARN(); streamArn = retryWhenThrottled(dss, maxDescribeStreamSummaryRetries, "DescribeStreamSummary"); } @@ -194,8 +207,9 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { T get() throws ExecutionException, InterruptedException; } - private T retryWhenThrottled(@NonNull final ServiceCallerSupplier retriever, final int maxRetries, - @NonNull final String apiName) throws DependencyException { + private T retryWhenThrottled( + @NonNull final ServiceCallerSupplier retriever, final int maxRetries, @NonNull final String apiName) + throws DependencyException { final AWSExceptionManager exceptionManager = createExceptionManager(); LimitExceededException finalException = null; @@ -223,8 +237,8 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { } if (finalException == null) { - throw new IllegalStateException( - String.format("%s : Finished all retries and no exception was caught while calling %s", streamName, apiName)); + throw new IllegalStateException(String.format( + "%s : Finished all retries and no exception was caught while calling %s", streamName, apiName)); } throw finalException; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java index 8404925d..3206e759 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java @@ -15,6 +15,15 @@ package software.amazon.kinesis.retrieval.fanout; +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + import com.google.common.annotations.VisibleForTesting; import lombok.AccessLevel; import lombok.Data; @@ -27,14 +36,12 @@ import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.ChildShard; import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent; import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream; import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponse; import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler; -import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.awssdk.utils.Either; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.common.InitialPositionInStreamExtended; @@ -51,23 +58,14 @@ import software.amazon.kinesis.retrieval.RecordsRetrieved; import software.amazon.kinesis.retrieval.RetryableRetrievalException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.time.Instant; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; - import static software.amazon.kinesis.common.DiagnosticUtils.takeDelayedDeliveryActionIfRequired; import static software.amazon.kinesis.retrieval.DataRetrievalUtil.isValidResult; @Slf4j @KinesisClientInternalApi public class FanOutRecordsPublisher implements RecordsPublisher { - private static final ThrowableCategory ACQUIRE_TIMEOUT_CATEGORY = new ThrowableCategory( - ThrowableType.ACQUIRE_TIMEOUT); + private static final ThrowableCategory ACQUIRE_TIMEOUT_CATEGORY = + new ThrowableCategory(ThrowableType.ACQUIRE_TIMEOUT); private static final ThrowableCategory READ_TIMEOUT_CATEGORY = new ThrowableCategory(ThrowableType.READ_TIMEOUT); // Max burst of 10 payload events + 1 terminal event (onError/onComplete) from the service. private static final int MAX_EVENT_BURST_FROM_SERVICE = 10 + 1; @@ -80,16 +78,19 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private final AtomicInteger subscribeToShardId = new AtomicInteger(0); private RecordFlow flow; - @Getter @VisibleForTesting + + @Getter + @VisibleForTesting private String currentSequenceNumber; + private InitialPositionInStreamExtended initialPositionInStreamExtended; private boolean isFirstConnection = true; private Subscriber subscriber; private long availableQueueSpace = 0; - private BlockingQueue recordsDeliveryQueue = new LinkedBlockingQueue<>( - MAX_EVENT_BURST_FROM_SERVICE); + private BlockingQueue recordsDeliveryQueue = + new LinkedBlockingQueue<>(MAX_EVENT_BURST_FROM_SERVICE); private RequestDetails lastSuccessfulRequestDetails = new RequestDetails(); @@ -100,7 +101,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher { this.streamAndShardId = shardId; } - public FanOutRecordsPublisher(KinesisAsyncClient kinesis, String shardId, String consumerArn, String streamIdentifierSer) { + public FanOutRecordsPublisher( + KinesisAsyncClient kinesis, String shardId, String consumerArn, String streamIdentifierSer) { this.kinesis = kinesis; this.shardId = shardId; this.consumerArn = consumerArn; @@ -108,16 +110,19 @@ public class FanOutRecordsPublisher implements RecordsPublisher { } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, + public void start( + ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) { synchronized (lockObject) { - log.debug("[{}] Initializing Publisher @ Sequence: {} -- Initial Position: {}", streamAndShardId, - extendedSequenceNumber, initialPositionInStreamExtended); + log.debug( + "[{}] Initializing Publisher @ Sequence: {} -- Initial Position: {}", + streamAndShardId, + extendedSequenceNumber, + initialPositionInStreamExtended); this.initialPositionInStreamExtended = initialPositionInStreamExtended; this.currentSequenceNumber = extendedSequenceNumber.sequenceNumber(); this.isFirstConnection = true; } - } @Override @@ -181,18 +186,18 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // RecordFlow of the current event that needs to be returned RecordFlow flowToBeReturned = null; - final RecordsRetrieved recordsRetrieved = recordsRetrievedContext != null ? - recordsRetrievedContext.getRecordsRetrieved() : null; + final RecordsRetrieved recordsRetrieved = + recordsRetrievedContext != null ? recordsRetrievedContext.getRecordsRetrieved() : null; // Check if the ack corresponds to the head of the delivery queue. - if (recordsRetrieved != null && recordsRetrieved.batchUniqueIdentifier() - .equals(recordsDeliveryAck.batchUniqueIdentifier())) { + if (recordsRetrieved != null + && recordsRetrieved.batchUniqueIdentifier().equals(recordsDeliveryAck.batchUniqueIdentifier())) { // It is now safe to remove the element recordsDeliveryQueue.poll(); // Take action based on the time spent by the event in queue. takeDelayedDeliveryActionIfRequired(streamAndShardId, recordsRetrievedContext.getEnqueueTimestamp(), log); // Update current sequence number for the successfully delivered event. - currentSequenceNumber = ((FanoutRecordsRetrieved)recordsRetrieved).continuationSequenceNumber(); + currentSequenceNumber = ((FanoutRecordsRetrieved) recordsRetrieved).continuationSequenceNumber(); // Update the triggering flow for post scheduling upstream request. flowToBeReturned = recordsRetrievedContext.getRecordFlow(); // Try scheduling the next event in the queue or execute the subscription shutdown action. @@ -203,15 +208,22 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // Check if the mismatched event belongs to active flow. If publisher receives an ack for a // missing event in active flow, then it means the event was already acked or cleared // from the queue due to a potential bug. - if (flow != null && recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier() - .equals(flow.getSubscribeToShardId())) { + if (flow != null + && recordsDeliveryAck + .batchUniqueIdentifier() + .getFlowIdentifier() + .equals(flow.getSubscribeToShardId())) { log.error( - "{}: Received unexpected ack for the active subscription {}. Throwing. ", streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier()); + "{}: Received unexpected ack for the active subscription {}. Throwing.", + streamAndShardId, + recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier()); throw new IllegalStateException("Unexpected ack for the active subscription"); } // Otherwise publisher received a stale ack. else { - log.info("{}: Publisher received an ack for stale subscription {}. Ignoring.", streamAndShardId, + log.info( + "{}: Publisher received an ack for stale subscription {}. Ignoring.", + streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier()); } } @@ -232,9 +244,13 @@ public class FanOutRecordsPublisher implements RecordsPublisher { subscriber.onNext(recordsRetrieved); } } catch (IllegalStateException e) { - - log.warn("{}: Unable to enqueue the payload due to capacity restrictions in delivery queue with remaining capacity {}. Last successful request details -- {}", - streamAndShardId, recordsDeliveryQueue.remainingCapacity(), lastSuccessfulRequestDetails); + // CHECKSTYLE.OFF: LineLength + log.warn( + "{}: Unable to enqueue the payload due to capacity restrictions in delivery queue with remaining capacity {}. Last successful request details -- {}", + // CHECKSTYLE.ON: LineLength + streamAndShardId, + recordsDeliveryQueue.remainingCapacity(), + lastSuccessfulRequestDetails); throw e; } catch (Throwable t) { log.error("{}: Unable to deliver event to the shard consumer.", streamAndShardId, t); @@ -246,6 +262,7 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private static final class RecordsRetrievedContext { @Getter(AccessLevel.NONE) private final Either recordsOrShutdownEvent; + private final RecordFlow recordFlow; private final Instant enqueueTimestamp; @@ -255,8 +272,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // This method is not thread-safe. You need to acquire a lock in the caller in order to execute this. void executeEventAction(Subscriber subscriber) { - recordsOrShutdownEvent.apply(recordsEvent -> subscriber.onNext(recordsEvent), - shutdownEvent -> shutdownEvent.getSubscriptionShutdownAction().run()); + recordsOrShutdownEvent.apply(recordsEvent -> subscriber.onNext(recordsEvent), shutdownEvent -> shutdownEvent + .getSubscriptionShutdownAction() + .run()); } } @@ -266,7 +284,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private final String eventIdentifier; private final Throwable shutdownEventThrowableOptional; - SubscriptionShutdownEvent(Runnable subscriptionShutdownAction, String eventIdentifier, Throwable shutdownEventThrowableOptional) { + SubscriptionShutdownEvent( + Runnable subscriptionShutdownAction, String eventIdentifier, Throwable shutdownEventThrowableOptional) { this.subscriptionShutdownAction = subscriptionShutdownAction; this.eventIdentifier = eventIdentifier; this.shutdownEventThrowableOptional = shutdownEventThrowableOptional; @@ -275,7 +294,6 @@ public class FanOutRecordsPublisher implements RecordsPublisher { SubscriptionShutdownEvent(Runnable subscriptionShutdownAction, String eventIdentifier) { this(subscriptionShutdownAction, eventIdentifier, null); } - } private boolean hasValidSubscriber() { @@ -291,10 +309,12 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // Clear the delivery queue so that any stale entries from previous subscription are discarded. resetRecordsDeliveryStateOnSubscriptionOnInit(); SubscribeToShardRequest.Builder builder = KinesisRequestsBuilder.subscribeToShardRequestBuilder() - .shardId(shardId).consumerARN(consumerArn); + .shardId(shardId) + .consumerARN(consumerArn); SubscribeToShardRequest request; if (isFirstConnection) { - request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStreamExtended).build(); + request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStreamExtended) + .build(); } else { request = IteratorBuilder.reconnectRequest(builder, sequenceNumber, initialPositionInStreamExtended) .build(); @@ -305,7 +325,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { String instanceId = shardId + "-" + subscribeInvocationId; log.debug( "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#subscribeToShard) @ {} id: {} -- Starting subscribe to shard", - streamAndShardId, connectionStart, instanceId); + streamAndShardId, + connectionStart, + instanceId); flow = new RecordFlow(this, connectionStart, instanceId); kinesis.subscribeToShard(request, flow); } @@ -313,17 +335,21 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private void errorOccurred(RecordFlow triggeringFlow, Throwable t) { synchronized (lockObject) { - if (!hasValidSubscriber()) { - if(hasValidFlow()) { + if (hasValidFlow()) { log.warn( - "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- Subscriber is null." + - " Last successful request details -- {}", streamAndShardId, flow.connectionStartedAt, - flow.subscribeToShardId, lastSuccessfulRequestDetails); + "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- Subscriber is null." + + " Last successful request details -- {}", + streamAndShardId, + flow.connectionStartedAt, + flow.subscribeToShardId, + lastSuccessfulRequestDetails); } else { log.warn( - "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) -- Subscriber and flow are null." + - " Last successful request details -- {}", streamAndShardId, lastSuccessfulRequestDetails); + "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) -- Subscriber and flow are null." + + " Last successful request details -- {}", + streamAndShardId, + lastSuccessfulRequestDetails); } return; } @@ -334,22 +360,26 @@ public class FanOutRecordsPublisher implements RecordsPublisher { if (isActiveFlow(triggeringFlow)) { if (flow != null) { String logMessage = String.format( - "%s: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ %s id: %s -- %s." + - " Last successful request details -- %s", streamAndShardId, flow.connectionStartedAt, flow.subscribeToShardId, category.throwableTypeString, lastSuccessfulRequestDetails); + "%s: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ %s id: %s -- %s." + + " Last successful request details -- %s", + streamAndShardId, + flow.connectionStartedAt, + flow.subscribeToShardId, + category.throwableTypeString, + lastSuccessfulRequestDetails); switch (category.throwableType) { - case READ_TIMEOUT: - log.debug(logMessage, propagationThrowable); - propagationThrowable = new RetryableRetrievalException(category.throwableTypeString, - (Exception) propagationThrowable.getCause()); - break; - case ACQUIRE_TIMEOUT: - logAcquireTimeoutMessage(t); - // - // Fall through is intentional here as we still want to log the details of the exception - // - default: - log.warn(logMessage, propagationThrowable); - + case READ_TIMEOUT: + log.debug(logMessage, propagationThrowable); + propagationThrowable = new RetryableRetrievalException( + category.throwableTypeString, (Exception) propagationThrowable.getCause()); + break; + case ACQUIRE_TIMEOUT: + logAcquireTimeoutMessage(t); + // + // Fall through is intentional here as we still want to log the details of the exception + // + default: + log.warn(logMessage, propagationThrowable); } flow.cancel(); } @@ -359,21 +389,27 @@ public class FanOutRecordsPublisher implements RecordsPublisher { try { handleFlowError(propagationThrowable, triggeringFlow); } catch (Throwable innerThrowable) { - log.warn("{}: Exception while calling subscriber.onError. Last successful request details -- {}", - streamAndShardId, lastSuccessfulRequestDetails, innerThrowable); + log.warn( + "{}: Exception while calling subscriber.onError. Last successful request details -- {}", + streamAndShardId, + lastSuccessfulRequestDetails, + innerThrowable); } subscriber = null; flow = null; } else { if (triggeringFlow != null) { log.debug( + // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- {} -> triggeringFlow wasn't the active flow. Didn't dispatch error", - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId, + // CHECKSTYLE.ON: LineLength + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId, category.throwableTypeString); triggeringFlow.cancel(); } } - } } @@ -381,17 +417,21 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private void resetRecordsDeliveryStateOnSubscriptionOnInit() { // Clear any lingering records in the queue. if (!recordsDeliveryQueue.isEmpty()) { - log.warn("{}: Found non-empty queue while starting subscription. This indicates unsuccessful clean up of " - + "previous subscription - {}. Last successful request details -- {}", streamAndShardId, subscribeToShardId, lastSuccessfulRequestDetails); + log.warn( + "{}: Found non-empty queue while starting subscription. This indicates unsuccessful clean up of " + + "previous subscription - {}. Last successful request details -- {}", + streamAndShardId, + subscribeToShardId, + lastSuccessfulRequestDetails); recordsDeliveryQueue.clear(); } } protected void logAcquireTimeoutMessage(Throwable t) { - log.error("An acquire timeout occurred which usually indicates that the KinesisAsyncClient supplied has a " + - "low maximum streams limit. " + - "Please use the software.amazon.kinesis.common.KinesisClientUtil to setup the client, " + - "or refer to the class to setup the client manually."); + log.error("An acquire timeout occurred which usually indicates that the KinesisAsyncClient supplied has a " + + "low maximum streams limit. " + + "Please use the software.amazon.kinesis.common.KinesisClientUtil to setup the client, " + + "or refer to the class to setup the client manually."); } private void handleFlowError(Throwable t, RecordFlow triggeringFlow) { @@ -402,7 +442,12 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // The ack received for this onNext event will be ignored by the publisher as the global flow object should // be either null or renewed when the ack's flow identifier is evaluated. FanoutRecordsRetrieved response = new FanoutRecordsRetrieved( - ProcessRecordsInput.builder().records(Collections.emptyList()).isAtShardEnd(true).childShards(Collections.emptyList()).build(), null, + ProcessRecordsInput.builder() + .records(Collections.emptyList()) + .isAtShardEnd(true) + .childShards(Collections.emptyList()) + .build(), + null, triggeringFlow != null ? triggeringFlow.getSubscribeToShardId() : shardId + "-no-flow-found"); subscriber.onNext(response); subscriber.onComplete(); @@ -412,7 +457,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { } private enum ThrowableType { - ACQUIRE_TIMEOUT("AcquireTimeout"), READ_TIMEOUT("ReadTimeout"), OTHER("Other"); + ACQUIRE_TIMEOUT("AcquireTimeout"), + READ_TIMEOUT("ReadTimeout"), + OTHER("Other"); String value; @@ -424,6 +471,7 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private static class ThrowableCategory { @NonNull final ThrowableType throwableType; + @NonNull final String throwableTypeString; @@ -467,7 +515,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { if (!hasValidSubscriber()) { log.debug( "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Subscriber is null.", - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); triggeringFlow.cancel(); if (flow != null) { flow.cancel(); @@ -477,36 +527,45 @@ public class FanOutRecordsPublisher implements RecordsPublisher { if (!isActiveFlow(triggeringFlow)) { log.debug( "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Received records for an inactive flow.", - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); return; } try { - // If recordBatchEvent is not valid event, RuntimeException will be thrown here and trigger the errorOccurred call. + // If recordBatchEvent is not valid event, RuntimeException will be thrown here and trigger the + // errorOccurred call. // Since the triggeringFlow is active flow, it will then trigger the handleFlowError call. - // Since the exception is not ResourceNotFoundException, it will trigger onError in the ShardConsumerSubscriber. + // Since the exception is not ResourceNotFoundException, it will trigger onError in the + // ShardConsumerSubscriber. // The ShardConsumerSubscriber will finally cancel the subscription. if (!isValidResult(recordBatchEvent.continuationSequenceNumber(), recordBatchEvent.childShards())) { - throw new InvalidStateException("RecordBatchEvent for flow " + triggeringFlow.toString() + " is invalid." - + " event.continuationSequenceNumber: " + recordBatchEvent.continuationSequenceNumber() - + ". event.childShards: " + recordBatchEvent.childShards()); + throw new InvalidStateException("RecordBatchEvent for flow " + triggeringFlow.toString() + + " is invalid." + + " event.continuationSequenceNumber: " + recordBatchEvent.continuationSequenceNumber() + + ". event.childShards: " + recordBatchEvent.childShards()); } - List records = recordBatchEvent.records().stream().map(KinesisClientRecord::fromRecord) - .collect(Collectors.toList()); + List records = recordBatchEvent.records().stream() + .map(KinesisClientRecord::fromRecord) + .collect(Collectors.toList()); ProcessRecordsInput input = ProcessRecordsInput.builder() - .cacheEntryTime(Instant.now()) - .millisBehindLatest(recordBatchEvent.millisBehindLatest()) - .isAtShardEnd(recordBatchEvent.continuationSequenceNumber() == null) - .records(records) - .childShards(recordBatchEvent.childShards()) - .build(); - FanoutRecordsRetrieved recordsRetrieved = new FanoutRecordsRetrieved(input, - recordBatchEvent.continuationSequenceNumber(), triggeringFlow.subscribeToShardId); + .cacheEntryTime(Instant.now()) + .millisBehindLatest(recordBatchEvent.millisBehindLatest()) + .isAtShardEnd(recordBatchEvent.continuationSequenceNumber() == null) + .records(records) + .childShards(recordBatchEvent.childShards()) + .build(); + FanoutRecordsRetrieved recordsRetrieved = new FanoutRecordsRetrieved( + input, recordBatchEvent.continuationSequenceNumber(), triggeringFlow.subscribeToShardId); bufferCurrentEventAndScheduleIfRequired(recordsRetrieved, triggeringFlow); } catch (Throwable t) { - log.warn("{}: Unable to buffer or schedule onNext for subscriber. Failing publisher." + - " Last successful request details -- {}", streamAndShardId, lastSuccessfulRequestDetails); + log.warn( + "{}: Unable to buffer or schedule onNext for subscriber. Failing publisher." + + " Last successful request details -- {}", + streamAndShardId, + lastSuccessfulRequestDetails); errorOccurred(triggeringFlow, t); } } @@ -515,8 +574,12 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private void updateAvailableQueueSpaceAndRequestUpstream(RecordFlow triggeringFlow) { if (availableQueueSpace <= 0) { log.debug( + // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Attempted to decrement availableQueueSpace to below 0", - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + // CHECKSTYLE.ON: LineLength + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); } else { availableQueueSpace--; if (availableQueueSpace > 0) { @@ -531,21 +594,30 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private void onComplete(RecordFlow triggeringFlow) { synchronized (lockObject) { - log.debug("{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", streamAndShardId, - triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + log.debug( + "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); triggeringFlow.cancel(); if (!hasValidSubscriber()) { - log.debug("{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", + log.debug( + "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", streamAndShardId, - triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); return; } if (!isActiveFlow(triggeringFlow)) { log.debug( + // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {} -- Received spurious onComplete from unexpected flow. Ignoring.", - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + // CHECKSTYLE.ON: LineLength + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); return; } @@ -603,8 +675,12 @@ public class FanOutRecordsPublisher implements RecordsPublisher { synchronized (lockObject) { if (subscriber != s) { log.warn( + // CHECKSTYLE.OFF: LineLength "{}: (FanOutRecordsPublisher/Subscription#request) - Rejected an attempt to request({}), because subscribers don't match. Last successful request details -- {}", - streamAndShardId, n, lastSuccessfulRequestDetails); + // CHECKSTYLE.ON: LineLength + streamAndShardId, + n, + lastSuccessfulRequestDetails); return; } if (flow == null) { @@ -630,20 +706,28 @@ public class FanOutRecordsPublisher implements RecordsPublisher { synchronized (lockObject) { if (subscriber != s) { log.warn( + // CHECKSTYLE.OFF: LineLength "{}: (FanOutRecordsPublisher/Subscription#cancel) - Rejected attempt to cancel subscription, because subscribers don't match. Last successful request details -- {}", - streamAndShardId, lastSuccessfulRequestDetails); + // CHECKSTYLE.ON: LineLength + streamAndShardId, + lastSuccessfulRequestDetails); return; } if (!hasValidSubscriber()) { log.warn( + // CHECKSTYLE.OFF: LineLength "{}: (FanOutRecordsPublisher/Subscription#cancel) - Cancelled called even with an invalid subscriber. Last successful request details -- {}", - streamAndShardId, lastSuccessfulRequestDetails); + // CHECKSTYLE.ON: LineLength + streamAndShardId, + lastSuccessfulRequestDetails); } subscriber = null; if (flow != null) { log.debug( "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher/Subscription#cancel) @ {} id: {}", - streamAndShardId, flow.connectionStartedAt, flow.subscribeToShardId); + streamAndShardId, + flow.connectionStartedAt, + flow.subscribeToShardId); flow.cancel(); availableQueueSpace = 0; } @@ -720,7 +804,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private final FanOutRecordsPublisher parent; private final Instant connectionStartedAt; - @Getter @VisibleForTesting + + @Getter + @VisibleForTesting private final String subscribeToShardId; private RecordSubscription subscription; @@ -731,13 +817,18 @@ public class FanOutRecordsPublisher implements RecordsPublisher { @Override public void onEventStream(SdkPublisher publisher) { synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- Subscribe", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + log.debug( + "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- Subscribe", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); if (!parent.isActiveFlow(this)) { this.isDisposed = true; log.debug( "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- parent is disposed", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); parent.rejectSubscription(publisher); return; } @@ -745,7 +836,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { try { log.debug( "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- creating record subscription", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); subscription = new RecordSubscription(parent, this, connectionStartedAt, subscribeToShardId); publisher.subscribe(subscription); @@ -756,7 +849,10 @@ public class FanOutRecordsPublisher implements RecordsPublisher { } catch (Throwable t) { log.debug( "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- throwable during record subscription: {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, t.getMessage()); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + t.getMessage()); parent.errorOccurred(this, t); } } @@ -764,10 +860,15 @@ public class FanOutRecordsPublisher implements RecordsPublisher { @Override public void responseReceived(SubscribeToShardResponse response) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#responseReceived) @ {} id: {} -- Response received. Request id - {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, response.responseMetadata().requestId()); + log.debug( + "{}: [SubscriptionLifetime]: (RecordFlow#responseReceived) @ {} id: {} -- Response received. Request id - {}", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + response.responseMetadata().requestId()); - final RequestDetails requestDetails = new RequestDetails(response.responseMetadata().requestId(), connectionStartedAt.toString()); + final RequestDetails requestDetails = + new RequestDetails(response.responseMetadata().requestId(), connectionStartedAt.toString()); parent.setLastSuccessfulRequestDetails(requestDetails); } @@ -778,7 +879,12 @@ public class FanOutRecordsPublisher implements RecordsPublisher { executeExceptionOccurred(throwable); } else { final SubscriptionShutdownEvent subscriptionShutdownEvent = new SubscriptionShutdownEvent( - () -> {parent.recordsDeliveryQueue.poll(); executeExceptionOccurred(throwable);}, "onError", throwable); + () -> { + parent.recordsDeliveryQueue.poll(); + executeExceptionOccurred(throwable); + }, + "onError", + throwable); tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent); } } @@ -786,14 +892,22 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private void executeExceptionOccurred(Throwable throwable) { synchronized (parent.lockObject) { - - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- {}: {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(), + log.debug( + "{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- {}: {}", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + throwable.getClass().getName(), throwable.getMessage()); if (this.isDisposed) { log.debug( + // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- This flow has been disposed, not dispatching error. {}: {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(), + // CHECKSTYLE.ON: LineLength + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + throwable.getClass().getName(), throwable.getMessage()); this.isErrorDispatched = true; } @@ -803,8 +917,13 @@ public class FanOutRecordsPublisher implements RecordsPublisher { isErrorDispatched = true; } else { log.debug( + // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- An error has previously been dispatched, not dispatching this error {}: {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(), + // CHECKSTYLE.OFF: LineLength + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + throwable.getClass().getName(), throwable.getMessage()); } } @@ -817,7 +936,11 @@ public class FanOutRecordsPublisher implements RecordsPublisher { executeComplete(); } else { final SubscriptionShutdownEvent subscriptionShutdownEvent = new SubscriptionShutdownEvent( - () -> {parent.recordsDeliveryQueue.poll(); executeComplete();}, "onComplete"); + () -> { + parent.recordsDeliveryQueue.poll(); + executeComplete(); + }, + "onComplete"); tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent); } } @@ -826,20 +949,28 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // This method is not thread safe. This needs to be executed after acquiring lock on parent.lockObject private void tryEnqueueSubscriptionShutdownEvent(SubscriptionShutdownEvent subscriptionShutdownEvent) { try { - parent.recordsDeliveryQueue - .add(new RecordsRetrievedContext(Either.right(subscriptionShutdownEvent), this, Instant.now())); + parent.recordsDeliveryQueue.add( + new RecordsRetrievedContext(Either.right(subscriptionShutdownEvent), this, Instant.now())); } catch (Exception e) { log.warn( + // CHECKSTYLE.OFF: LineLength "{}: Unable to enqueue the {} shutdown event due to capacity restrictions in delivery queue with remaining capacity {}. Ignoring. Last successful request details -- {}", - parent.streamAndShardId, subscriptionShutdownEvent.getEventIdentifier(), parent.recordsDeliveryQueue.remainingCapacity(), - parent.lastSuccessfulRequestDetails, subscriptionShutdownEvent.getShutdownEventThrowableOptional()); + // CHECKSTYLE.ON: LineLength + parent.streamAndShardId, + subscriptionShutdownEvent.getEventIdentifier(), + parent.recordsDeliveryQueue.remainingCapacity(), + parent.lastSuccessfulRequestDetails, + subscriptionShutdownEvent.getShutdownEventThrowableOptional()); } } private void executeComplete() { synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- Connection completed", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + log.debug( + "{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- Connection completed", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); if (isCancelled) { // @@ -848,14 +979,21 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // the // subscription, which was cancelled for a reason (usually queue overflow). // - log.warn("{}: complete called on a cancelled subscription. Ignoring completion. Last successful request details -- {}", - parent.streamAndShardId, parent.lastSuccessfulRequestDetails); + log.warn( + "{}: complete called on a cancelled subscription. Ignoring completion. Last successful request details -- {}", + parent.streamAndShardId, + parent.lastSuccessfulRequestDetails); return; } if (this.isDisposed) { log.warn( + // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- This flow has been disposed not dispatching completion. Last successful request details -- {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, parent.lastSuccessfulRequestDetails); + // CHECKSTYLE.ON: LineLength + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + parent.lastSuccessfulRequestDetails); return; } @@ -872,8 +1010,14 @@ public class FanOutRecordsPublisher implements RecordsPublisher { subscription.cancel(); } catch (Throwable t) { log.error( + // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- Exception while trying to cancel failed subscription: {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, t.getMessage(), t); + // CHECKSTYLE.ON: LineLength + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + t.getMessage(), + t); } } } @@ -913,15 +1057,20 @@ public class FanOutRecordsPublisher implements RecordsPublisher { public void cancel() { synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordSubscription#cancel) @ {} id: {} -- Cancel called", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + log.debug( + "{}: [SubscriptionLifetime]: (RecordSubscription#cancel) @ {} id: {} -- Cancel called", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); flow.isCancelled = true; if (subscription != null) { subscription.cancel(); } else { log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#cancel) @ {} id: {} -- SDK subscription is null", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); } } } @@ -934,22 +1083,35 @@ public class FanOutRecordsPublisher implements RecordsPublisher { if (flow.shouldSubscriptionCancel()) { if (flow.isCancelled) { log.debug( + // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- Subscription was cancelled before onSubscribe", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + // CHECKSTYLE.ON: LineLength + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); } if (flow.isDisposed) { log.debug( + // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- RecordFlow has been disposed cancelling subscribe", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + // CHECKSTYLE.ON: LineLength + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); } log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- RecordFlow requires cancelling", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); cancel(); } log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- Outstanding: {} items so requesting an item", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, parent.availableQueueSpace); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + parent.availableQueueSpace); if (parent.availableQueueSpace > 0) { request(1); } @@ -962,7 +1124,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { if (flow.shouldSubscriptionCancel()) { log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#onNext) @ {} id: {} -- RecordFlow requires cancelling", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); cancel(); return; } @@ -977,8 +1141,13 @@ public class FanOutRecordsPublisher implements RecordsPublisher { @Override public void onError(Throwable t) { - log.debug("{}: [SubscriptionLifetime]: (RecordSubscription#onError) @ {} id: {} -- {}: {}", parent.streamAndShardId, - connectionStartedAt, subscribeToShardId, t.getClass().getName(), t.getMessage()); + log.debug( + "{}: [SubscriptionLifetime]: (RecordSubscription#onError) @ {} id: {} -- {}: {}", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + t.getClass().getName(), + t.getMessage()); // // We don't propagate the throwable, as the SDK will call @@ -990,7 +1159,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { public void onComplete() { log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#onComplete) @ {} id: {} -- Allowing RecordFlow to call onComplete", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); } } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java index 35301624..05a42c0a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java @@ -15,6 +15,12 @@ package software.amazon.kinesis.retrieval.fanout; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import javax.annotation.Nullable; + import lombok.NonNull; import lombok.RequiredArgsConstructor; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; @@ -23,15 +29,9 @@ import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.leases.ShardInfo; import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.RetrievalFactory; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.function.Function; - @RequiredArgsConstructor @KinesisClientInternalApi public class FanOutRetrievalFactory implements RetrievalFactory { @@ -41,38 +41,32 @@ public class FanOutRetrievalFactory implements RetrievalFactory { private final String defaultConsumerArn; private final Function consumerArnCreator; - private Map implicitConsumerArnTracker = new HashMap<>(); + private final Map implicitConsumerArnTracker = new HashMap<>(); @Override - public GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy(final ShardInfo shardInfo, - final MetricsFactory metricsFactory) { - return null; - } - - @Override - public RecordsPublisher createGetRecordsCache(@NonNull final ShardInfo shardInfo, - final StreamConfig streamConfig, - final MetricsFactory metricsFactory) { + public RecordsPublisher createGetRecordsCache( + @NonNull final ShardInfo shardInfo, + @NonNull final StreamConfig streamConfig, + @Nullable final MetricsFactory metricsFactory) { final Optional streamIdentifierStr = shardInfo.streamIdentifierSerOpt(); - if(streamIdentifierStr.isPresent()) { - final StreamIdentifier streamIdentifier = StreamIdentifier.multiStreamInstance(streamIdentifierStr.get()); - return new FanOutRecordsPublisher(kinesisClient, shardInfo.shardId(), - getOrCreateConsumerArn(streamIdentifier, streamConfig.consumerArn()), + if (streamIdentifierStr.isPresent()) { + return new FanOutRecordsPublisher( + kinesisClient, + shardInfo.shardId(), + getOrCreateConsumerArn(streamConfig.streamIdentifier(), streamConfig.consumerArn()), streamIdentifierStr.get()); } else { - final StreamIdentifier streamIdentifier = StreamIdentifier.singleStreamInstance(defaultStreamName); - return new FanOutRecordsPublisher(kinesisClient, shardInfo.shardId(), - getOrCreateConsumerArn(streamIdentifier, defaultConsumerArn)); + return new FanOutRecordsPublisher( + kinesisClient, + shardInfo.shardId(), + getOrCreateConsumerArn(streamConfig.streamIdentifier(), defaultConsumerArn)); } } - @Override - public RecordsPublisher createGetRecordsCache(ShardInfo shardInfo, MetricsFactory metricsFactory) { - throw new UnsupportedOperationException("FanoutRetrievalFactory needs StreamConfig Info"); - } - private String getOrCreateConsumerArn(StreamIdentifier streamIdentifier, String consumerArn) { - return consumerArn != null ? consumerArn : implicitConsumerArnTracker - .computeIfAbsent(streamIdentifier, sId -> consumerArnCreator.apply(sId.streamName())); + return consumerArn != null + ? consumerArn + : implicitConsumerArnTracker.computeIfAbsent( + streamIdentifier, sId -> consumerArnCreator.apply(sId.streamName())); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java index f48adaa7..4d2ef80a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java @@ -15,5 +15,4 @@ package software.amazon.kinesis.retrieval.fanout; -public class MultipleSubscriberException extends RuntimeException { -} +public class MultipleSubscriberException extends RuntimeException {} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java index 0c1c4a28..fa52ed2a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java @@ -15,23 +15,25 @@ package software.amazon.kinesis.retrieval.kpl; import java.math.BigInteger; +import java.util.Arrays; +import java.util.Collections; +import java.util.Set; +import java.util.stream.Collectors; -//import com.amazonaws.services.kinesis.clientlibrary.lib.worker.String; +import lombok.EqualsAndHashCode; import software.amazon.kinesis.checkpoint.SentinelCheckpoint; /** * Represents a two-part sequence number for records aggregated by the Kinesis * Producer Library. - * + * *

    * The KPL combines multiple user records into a single Kinesis record. Each * user record therefore has an integer sub-sequence number, in addition to the * regular sequence number of the Kinesis record. The sub-sequence number is * used to checkpoint within an aggregated record. - * - * @author daphnliu - * */ +@EqualsAndHashCode public class ExtendedSequenceNumber implements Comparable { private final String sequenceNumber; private final long subSequenceNumber; @@ -46,17 +48,17 @@ public class ExtendedSequenceNumber implements Comparable SENTINEL_VALUES = + Collections.unmodifiableSet(Arrays.stream(SentinelCheckpoint.values()) + .map(SentinelCheckpoint::name) + .collect(Collectors.toSet())); + /** * Construct an ExtendedSequenceNumber. The sub-sequence number defaults to * 0. - * + * * @param sequenceNumber * Sequence number of the Kinesis record */ @@ -78,7 +91,7 @@ public class ExtendedSequenceNumber implements Comparable= 0) { - sb.append("SubsequenceNumber: " + subSequenceNumber()); + sb.append("SubsequenceNumber: ").append(subSequenceNumber()); } - sb.append("}"); + sb.append('}'); return sb.toString(); } - - @Override - public int hashCode() { - final int prime = 31; - final int shift = 32; - int hashCode = 1; - hashCode = prime * hashCode + ((sequenceNumber == null) ? 0 : sequenceNumber.hashCode()); - hashCode = prime * hashCode + ((subSequenceNumber < 0) - ? 0 - : (int) (subSequenceNumber ^ (subSequenceNumber >>> shift))); - return hashCode; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - - if (!(obj instanceof ExtendedSequenceNumber)) { - return false; - } - ExtendedSequenceNumber other = (ExtendedSequenceNumber) obj; - - if (!sequenceNumber.equals(other.sequenceNumber())) { - return false; - } - return subSequenceNumber == other.subSequenceNumber(); - } /** * Sequence numbers are converted, sentinels are given a value of -1. Note this method is only used after special * logic associated with SHARD_END and the case of comparing two sentinel values has already passed, so we map * sentinel values LATEST, TRIM_HORIZON and AT_TIMESTAMP to negative numbers so that they are considered less than * sequence numbers. - * + * * @param sequenceNumber The string to convert to big integer value * @return a BigInteger value representation of the sequenceNumber */ @@ -217,36 +197,29 @@ public class ExtendedSequenceNumber implements Comparablerequired string key = 1; - */ - boolean hasKey(); - /** - * required string key = 1; - */ - java.lang.String getKey(); - /** - * required string key = 1; - */ - com.google.protobuf.ByteString - getKeyBytes(); - - /** - * optional string value = 2; - */ - boolean hasValue(); - /** - * optional string value = 2; - */ - java.lang.String getValue(); - /** - * optional string value = 2; - */ - com.google.protobuf.ByteString - getValueBytes(); - } - /** - * Protobuf type {@code Tag} - */ - public static final class Tag extends - com.google.protobuf.GeneratedMessage implements - // @@protoc_insertion_point(message_implements:Tag) - TagOrBuilder { - // Use Tag.newBuilder() to construct. - private Tag(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Tag(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Tag defaultInstance; - public static Tag getDefaultInstance() { - return defaultInstance; - } - - public Tag getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Tag( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - key_ = bs; - break; - } - case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; - value_ = bs; - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_Tag_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_Tag_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.Tag.class, Messages.Tag.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Tag parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Tag(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - public static final int KEY_FIELD_NUMBER = 1; - private java.lang.Object key_; - /** - * required string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string key = 1; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } - return s; - } - } - /** - * required string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int VALUE_FIELD_NUMBER = 2; - private java.lang.Object value_; - /** - * optional string value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string value = 2; - */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } - return s; - } - } - /** - * optional string value = 2; - */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - key_ = ""; - value_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - if (!hasKey()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getKeyBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getValueBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getKeyBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getValueBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static Messages.Tag parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.Tag parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.Tag parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.Tag parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.Tag parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.Tag parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static Messages.Tag parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static Messages.Tag parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static Messages.Tag parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.Tag parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(Messages.Tag prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code Tag} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder implements - // @@protoc_insertion_point(builder_implements:Tag) - Messages.TagOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_Tag_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_Tag_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.Tag.class, Messages.Tag.Builder.class); - } - - // Construct using Messages.Tag.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - key_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - value_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return Messages.internal_static_Tag_descriptor; - } - - public Messages.Tag getDefaultInstanceForType() { - return Messages.Tag.getDefaultInstance(); - } - - public Messages.Tag build() { - Messages.Tag result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public Messages.Tag buildPartial() { - Messages.Tag result = new Messages.Tag(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.key_ = key_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.value_ = value_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof Messages.Tag) { - return mergeFrom((Messages.Tag)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(Messages.Tag other) { - if (other == Messages.Tag.getDefaultInstance()) return this; - if (other.hasKey()) { - bitField0_ |= 0x00000001; - key_ = other.key_; - onChanged(); - } - if (other.hasValue()) { - bitField0_ |= 0x00000002; - value_ = other.value_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasKey()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Messages.Tag parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (Messages.Tag) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.lang.Object key_ = ""; - /** - * required string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string key = 1; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string key = 1; - */ - public Builder setKey( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - /** - * required string key = 1; - */ - public Builder clearKey() { - bitField0_ = (bitField0_ & ~0x00000001); - key_ = getDefaultInstance().getKey(); - onChanged(); - return this; - } - /** - * required string key = 1; - */ - public Builder setKeyBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - - private java.lang.Object value_ = ""; - /** - * optional string value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string value = 2; - */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string value = 2; - */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string value = 2; - */ - public Builder setValue( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - /** - * optional string value = 2; - */ - public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); - onChanged(); - return this; - } - /** - * optional string value = 2; - */ - public Builder setValueBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:Tag) - } - - static { - defaultInstance = new Tag(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:Tag) - } - - public interface RecordOrBuilder extends - // @@protoc_insertion_point(interface_extends:Record) - com.google.protobuf.MessageOrBuilder { - - /** - * required uint64 partition_key_index = 1; - */ - boolean hasPartitionKeyIndex(); - /** - * required uint64 partition_key_index = 1; - */ - long getPartitionKeyIndex(); - - /** - * optional uint64 explicit_hash_key_index = 2; - */ - boolean hasExplicitHashKeyIndex(); - /** - * optional uint64 explicit_hash_key_index = 2; - */ - long getExplicitHashKeyIndex(); - - /** - * required bytes data = 3; - */ - boolean hasData(); - /** - * required bytes data = 3; - */ - com.google.protobuf.ByteString getData(); - - /** - * repeated .Tag tags = 4; - */ - java.util.List - getTagsList(); - /** - * repeated .Tag tags = 4; - */ - Messages.Tag getTags(int index); - /** - * repeated .Tag tags = 4; - */ - int getTagsCount(); - /** - * repeated .Tag tags = 4; - */ - java.util.List - getTagsOrBuilderList(); - /** - * repeated .Tag tags = 4; - */ - Messages.TagOrBuilder getTagsOrBuilder( - int index); - } - /** - * Protobuf type {@code Record} - */ - public static final class Record extends - com.google.protobuf.GeneratedMessage implements - // @@protoc_insertion_point(message_implements:Record) - RecordOrBuilder { - // Use Record.newBuilder() to construct. - private Record(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Record(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Record defaultInstance; - public static Record getDefaultInstance() { - return defaultInstance; - } - - public Record getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Record( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - partitionKeyIndex_ = input.readUInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - explicitHashKeyIndex_ = input.readUInt64(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - data_ = input.readBytes(); - break; - } - case 34: { - if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tags_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000008; - } - tags_.add(input.readMessage(Messages.Tag.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tags_ = java.util.Collections.unmodifiableList(tags_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_Record_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_Record_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.Record.class, Messages.Record.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Record parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Record(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - public static final int PARTITION_KEY_INDEX_FIELD_NUMBER = 1; - private long partitionKeyIndex_; - /** - * required uint64 partition_key_index = 1; - */ - public boolean hasPartitionKeyIndex() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 partition_key_index = 1; - */ - public long getPartitionKeyIndex() { - return partitionKeyIndex_; - } - - public static final int EXPLICIT_HASH_KEY_INDEX_FIELD_NUMBER = 2; - private long explicitHashKeyIndex_; - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public boolean hasExplicitHashKeyIndex() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public long getExplicitHashKeyIndex() { - return explicitHashKeyIndex_; - } - - public static final int DATA_FIELD_NUMBER = 3; - private com.google.protobuf.ByteString data_; - /** - * required bytes data = 3; - */ - public boolean hasData() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required bytes data = 3; - */ - public com.google.protobuf.ByteString getData() { - return data_; - } - - public static final int TAGS_FIELD_NUMBER = 4; - private java.util.List tags_; - /** - * repeated .Tag tags = 4; - */ - public java.util.List getTagsList() { - return tags_; - } - /** - * repeated .Tag tags = 4; - */ - public java.util.List - getTagsOrBuilderList() { - return tags_; - } - /** - * repeated .Tag tags = 4; - */ - public int getTagsCount() { - return tags_.size(); - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag getTags(int index) { - return tags_.get(index); - } - /** - * repeated .Tag tags = 4; - */ - public Messages.TagOrBuilder getTagsOrBuilder( - int index) { - return tags_.get(index); - } - - private void initFields() { - partitionKeyIndex_ = 0L; - explicitHashKeyIndex_ = 0L; - data_ = com.google.protobuf.ByteString.EMPTY; - tags_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - if (!hasPartitionKeyIndex()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasData()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getTagsCount(); i++) { - if (!getTags(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, partitionKeyIndex_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, explicitHashKeyIndex_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, data_); - } - for (int i = 0; i < tags_.size(); i++) { - output.writeMessage(4, tags_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, partitionKeyIndex_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, explicitHashKeyIndex_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, data_); - } - for (int i = 0; i < tags_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, tags_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static Messages.Record parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.Record parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.Record parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.Record parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.Record parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.Record parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static Messages.Record parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static Messages.Record parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static Messages.Record parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.Record parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(Messages.Record prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code Record} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder implements - // @@protoc_insertion_point(builder_implements:Record) - Messages.RecordOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_Record_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_Record_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.Record.class, Messages.Record.Builder.class); - } - - // Construct using Messages.Record.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTagsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - partitionKeyIndex_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - explicitHashKeyIndex_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - data_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - if (tagsBuilder_ == null) { - tags_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - tagsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return Messages.internal_static_Record_descriptor; - } - - public Messages.Record getDefaultInstanceForType() { - return Messages.Record.getDefaultInstance(); - } - - public Messages.Record build() { - Messages.Record result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public Messages.Record buildPartial() { - Messages.Record result = new Messages.Record(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.partitionKeyIndex_ = partitionKeyIndex_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.explicitHashKeyIndex_ = explicitHashKeyIndex_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.data_ = data_; - if (tagsBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - tags_ = java.util.Collections.unmodifiableList(tags_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.tags_ = tags_; - } else { - result.tags_ = tagsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof Messages.Record) { - return mergeFrom((Messages.Record)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(Messages.Record other) { - if (other == Messages.Record.getDefaultInstance()) return this; - if (other.hasPartitionKeyIndex()) { - setPartitionKeyIndex(other.getPartitionKeyIndex()); - } - if (other.hasExplicitHashKeyIndex()) { - setExplicitHashKeyIndex(other.getExplicitHashKeyIndex()); - } - if (other.hasData()) { - setData(other.getData()); - } - if (tagsBuilder_ == null) { - if (!other.tags_.isEmpty()) { - if (tags_.isEmpty()) { - tags_ = other.tags_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureTagsIsMutable(); - tags_.addAll(other.tags_); - } - onChanged(); - } - } else { - if (!other.tags_.isEmpty()) { - if (tagsBuilder_.isEmpty()) { - tagsBuilder_.dispose(); - tagsBuilder_ = null; - tags_ = other.tags_; - bitField0_ = (bitField0_ & ~0x00000008); - tagsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTagsFieldBuilder() : null; - } else { - tagsBuilder_.addAllMessages(other.tags_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasPartitionKeyIndex()) { - - return false; - } - if (!hasData()) { - - return false; - } - for (int i = 0; i < getTagsCount(); i++) { - if (!getTags(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Messages.Record parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (Messages.Record) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private long partitionKeyIndex_ ; - /** - * required uint64 partition_key_index = 1; - */ - public boolean hasPartitionKeyIndex() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 partition_key_index = 1; - */ - public long getPartitionKeyIndex() { - return partitionKeyIndex_; - } - /** - * required uint64 partition_key_index = 1; - */ - public Builder setPartitionKeyIndex(long value) { - bitField0_ |= 0x00000001; - partitionKeyIndex_ = value; - onChanged(); - return this; - } - /** - * required uint64 partition_key_index = 1; - */ - public Builder clearPartitionKeyIndex() { - bitField0_ = (bitField0_ & ~0x00000001); - partitionKeyIndex_ = 0L; - onChanged(); - return this; - } - - private long explicitHashKeyIndex_ ; - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public boolean hasExplicitHashKeyIndex() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public long getExplicitHashKeyIndex() { - return explicitHashKeyIndex_; - } - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public Builder setExplicitHashKeyIndex(long value) { - bitField0_ |= 0x00000002; - explicitHashKeyIndex_ = value; - onChanged(); - return this; - } - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public Builder clearExplicitHashKeyIndex() { - bitField0_ = (bitField0_ & ~0x00000002); - explicitHashKeyIndex_ = 0L; - onChanged(); - return this; - } - - private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes data = 3; - */ - public boolean hasData() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required bytes data = 3; - */ - public com.google.protobuf.ByteString getData() { - return data_; - } - /** - * required bytes data = 3; - */ - public Builder setData(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - data_ = value; - onChanged(); - return this; - } - /** - * required bytes data = 3; - */ - public Builder clearData() { - bitField0_ = (bitField0_ & ~0x00000004); - data_ = getDefaultInstance().getData(); - onChanged(); - return this; - } - - private java.util.List tags_ = - java.util.Collections.emptyList(); - private void ensureTagsIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - tags_ = new java.util.ArrayList(tags_); - bitField0_ |= 0x00000008; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - Messages.Tag, Messages.Tag.Builder, Messages.TagOrBuilder> tagsBuilder_; - - /** - * repeated .Tag tags = 4; - */ - public java.util.List getTagsList() { - if (tagsBuilder_ == null) { - return java.util.Collections.unmodifiableList(tags_); - } else { - return tagsBuilder_.getMessageList(); - } - } - /** - * repeated .Tag tags = 4; - */ - public int getTagsCount() { - if (tagsBuilder_ == null) { - return tags_.size(); - } else { - return tagsBuilder_.getCount(); - } - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag getTags(int index) { - if (tagsBuilder_ == null) { - return tags_.get(index); - } else { - return tagsBuilder_.getMessage(index); - } - } - /** - * repeated .Tag tags = 4; - */ - public Builder setTags( - int index, Messages.Tag value) { - if (tagsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTagsIsMutable(); - tags_.set(index, value); - onChanged(); - } else { - tagsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder setTags( - int index, Messages.Tag.Builder builderForValue) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - tags_.set(index, builderForValue.build()); - onChanged(); - } else { - tagsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addTags(Messages.Tag value) { - if (tagsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTagsIsMutable(); - tags_.add(value); - onChanged(); - } else { - tagsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addTags( - int index, Messages.Tag value) { - if (tagsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTagsIsMutable(); - tags_.add(index, value); - onChanged(); - } else { - tagsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addTags( - Messages.Tag.Builder builderForValue) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - tags_.add(builderForValue.build()); - onChanged(); - } else { - tagsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addTags( - int index, Messages.Tag.Builder builderForValue) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - tags_.add(index, builderForValue.build()); - onChanged(); - } else { - tagsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addAllTags( - java.lang.Iterable values) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, tags_); - onChanged(); - } else { - tagsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder clearTags() { - if (tagsBuilder_ == null) { - tags_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - tagsBuilder_.clear(); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder removeTags(int index) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - tags_.remove(index); - onChanged(); - } else { - tagsBuilder_.remove(index); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag.Builder getTagsBuilder( - int index) { - return getTagsFieldBuilder().getBuilder(index); - } - /** - * repeated .Tag tags = 4; - */ - public Messages.TagOrBuilder getTagsOrBuilder( - int index) { - if (tagsBuilder_ == null) { - return tags_.get(index); } else { - return tagsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .Tag tags = 4; - */ - public java.util.List - getTagsOrBuilderList() { - if (tagsBuilder_ != null) { - return tagsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(tags_); - } - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag.Builder addTagsBuilder() { - return getTagsFieldBuilder().addBuilder( - Messages.Tag.getDefaultInstance()); - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag.Builder addTagsBuilder( - int index) { - return getTagsFieldBuilder().addBuilder( - index, Messages.Tag.getDefaultInstance()); - } - /** - * repeated .Tag tags = 4; - */ - public java.util.List - getTagsBuilderList() { - return getTagsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - Messages.Tag, Messages.Tag.Builder, Messages.TagOrBuilder> - getTagsFieldBuilder() { - if (tagsBuilder_ == null) { - tagsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - Messages.Tag, Messages.Tag.Builder, Messages.TagOrBuilder>( - tags_, - ((bitField0_ & 0x00000008) == 0x00000008), - getParentForChildren(), - isClean()); - tags_ = null; - } - return tagsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:Record) - } - - static { - defaultInstance = new Record(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:Record) - } - - public interface AggregatedRecordOrBuilder extends - // @@protoc_insertion_point(interface_extends:AggregatedRecord) - com.google.protobuf.MessageOrBuilder { - - /** - * repeated string partition_key_table = 1; - */ - com.google.protobuf.ProtocolStringList - getPartitionKeyTableList(); - /** - * repeated string partition_key_table = 1; - */ - int getPartitionKeyTableCount(); - /** - * repeated string partition_key_table = 1; - */ - java.lang.String getPartitionKeyTable(int index); - /** - * repeated string partition_key_table = 1; - */ - com.google.protobuf.ByteString - getPartitionKeyTableBytes(int index); - - /** - * repeated string explicit_hash_key_table = 2; - */ - com.google.protobuf.ProtocolStringList - getExplicitHashKeyTableList(); - /** - * repeated string explicit_hash_key_table = 2; - */ - int getExplicitHashKeyTableCount(); - /** - * repeated string explicit_hash_key_table = 2; - */ - java.lang.String getExplicitHashKeyTable(int index); - /** - * repeated string explicit_hash_key_table = 2; - */ - com.google.protobuf.ByteString - getExplicitHashKeyTableBytes(int index); - - /** - * repeated .Record records = 3; - */ - java.util.List - getRecordsList(); - /** - * repeated .Record records = 3; - */ - Messages.Record getRecords(int index); - /** - * repeated .Record records = 3; - */ - int getRecordsCount(); - /** - * repeated .Record records = 3; - */ - java.util.List - getRecordsOrBuilderList(); - /** - * repeated .Record records = 3; - */ - Messages.RecordOrBuilder getRecordsOrBuilder( - int index); - } - /** - * Protobuf type {@code AggregatedRecord} - */ - public static final class AggregatedRecord extends - com.google.protobuf.GeneratedMessage implements - // @@protoc_insertion_point(message_implements:AggregatedRecord) - AggregatedRecordOrBuilder { - // Use AggregatedRecord.newBuilder() to construct. - private AggregatedRecord(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private AggregatedRecord(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final AggregatedRecord defaultInstance; - public static AggregatedRecord getDefaultInstance() { - return defaultInstance; - } - - public AggregatedRecord getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private AggregatedRecord( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - partitionKeyTable_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000001; - } - partitionKeyTable_.add(bs); - break; - } - case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - explicitHashKeyTable_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000002; - } - explicitHashKeyTable_.add(bs); - break; - } - case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - records_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - records_.add(input.readMessage(Messages.Record.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - partitionKeyTable_ = partitionKeyTable_.getUnmodifiableView(); - } - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - explicitHashKeyTable_ = explicitHashKeyTable_.getUnmodifiableView(); - } - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - records_ = java.util.Collections.unmodifiableList(records_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_AggregatedRecord_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_AggregatedRecord_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.AggregatedRecord.class, Messages.AggregatedRecord.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public AggregatedRecord parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new AggregatedRecord(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public static final int PARTITION_KEY_TABLE_FIELD_NUMBER = 1; - private com.google.protobuf.LazyStringList partitionKeyTable_; - /** - * repeated string partition_key_table = 1; - */ - public com.google.protobuf.ProtocolStringList - getPartitionKeyTableList() { - return partitionKeyTable_; - } - /** - * repeated string partition_key_table = 1; - */ - public int getPartitionKeyTableCount() { - return partitionKeyTable_.size(); - } - /** - * repeated string partition_key_table = 1; - */ - public java.lang.String getPartitionKeyTable(int index) { - return partitionKeyTable_.get(index); - } - /** - * repeated string partition_key_table = 1; - */ - public com.google.protobuf.ByteString - getPartitionKeyTableBytes(int index) { - return partitionKeyTable_.getByteString(index); - } - - public static final int EXPLICIT_HASH_KEY_TABLE_FIELD_NUMBER = 2; - private com.google.protobuf.LazyStringList explicitHashKeyTable_; - /** - * repeated string explicit_hash_key_table = 2; - */ - public com.google.protobuf.ProtocolStringList - getExplicitHashKeyTableList() { - return explicitHashKeyTable_; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public int getExplicitHashKeyTableCount() { - return explicitHashKeyTable_.size(); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public java.lang.String getExplicitHashKeyTable(int index) { - return explicitHashKeyTable_.get(index); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public com.google.protobuf.ByteString - getExplicitHashKeyTableBytes(int index) { - return explicitHashKeyTable_.getByteString(index); - } - - public static final int RECORDS_FIELD_NUMBER = 3; - private java.util.List records_; - /** - * repeated .Record records = 3; - */ - public java.util.List getRecordsList() { - return records_; - } - /** - * repeated .Record records = 3; - */ - public java.util.List - getRecordsOrBuilderList() { - return records_; - } - /** - * repeated .Record records = 3; - */ - public int getRecordsCount() { - return records_.size(); - } - /** - * repeated .Record records = 3; - */ - public Messages.Record getRecords(int index) { - return records_.get(index); - } - /** - * repeated .Record records = 3; - */ - public Messages.RecordOrBuilder getRecordsOrBuilder( - int index) { - return records_.get(index); - } - - private void initFields() { - partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - records_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - for (int i = 0; i < getRecordsCount(); i++) { - if (!getRecords(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < partitionKeyTable_.size(); i++) { - output.writeBytes(1, partitionKeyTable_.getByteString(i)); - } - for (int i = 0; i < explicitHashKeyTable_.size(); i++) { - output.writeBytes(2, explicitHashKeyTable_.getByteString(i)); - } - for (int i = 0; i < records_.size(); i++) { - output.writeMessage(3, records_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < partitionKeyTable_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(partitionKeyTable_.getByteString(i)); - } - size += dataSize; - size += 1 * getPartitionKeyTableList().size(); - } - { - int dataSize = 0; - for (int i = 0; i < explicitHashKeyTable_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(explicitHashKeyTable_.getByteString(i)); - } - size += dataSize; - size += 1 * getExplicitHashKeyTableList().size(); - } - for (int i = 0; i < records_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, records_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static Messages.AggregatedRecord parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.AggregatedRecord parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.AggregatedRecord parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.AggregatedRecord parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.AggregatedRecord parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.AggregatedRecord parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static Messages.AggregatedRecord parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static Messages.AggregatedRecord parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static Messages.AggregatedRecord parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.AggregatedRecord parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(Messages.AggregatedRecord prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code AggregatedRecord} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder implements - // @@protoc_insertion_point(builder_implements:AggregatedRecord) - Messages.AggregatedRecordOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_AggregatedRecord_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_AggregatedRecord_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.AggregatedRecord.class, Messages.AggregatedRecord.Builder.class); - } - - // Construct using Messages.AggregatedRecord.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRecordsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - if (recordsBuilder_ == null) { - records_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - recordsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return Messages.internal_static_AggregatedRecord_descriptor; - } - - public Messages.AggregatedRecord getDefaultInstanceForType() { - return Messages.AggregatedRecord.getDefaultInstance(); - } - - public Messages.AggregatedRecord build() { - Messages.AggregatedRecord result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public Messages.AggregatedRecord buildPartial() { - Messages.AggregatedRecord result = new Messages.AggregatedRecord(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - partitionKeyTable_ = partitionKeyTable_.getUnmodifiableView(); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.partitionKeyTable_ = partitionKeyTable_; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - explicitHashKeyTable_ = explicitHashKeyTable_.getUnmodifiableView(); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.explicitHashKeyTable_ = explicitHashKeyTable_; - if (recordsBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - records_ = java.util.Collections.unmodifiableList(records_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.records_ = records_; - } else { - result.records_ = recordsBuilder_.build(); - } - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof Messages.AggregatedRecord) { - return mergeFrom((Messages.AggregatedRecord)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(Messages.AggregatedRecord other) { - if (other == Messages.AggregatedRecord.getDefaultInstance()) return this; - if (!other.partitionKeyTable_.isEmpty()) { - if (partitionKeyTable_.isEmpty()) { - partitionKeyTable_ = other.partitionKeyTable_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensurePartitionKeyTableIsMutable(); - partitionKeyTable_.addAll(other.partitionKeyTable_); - } - onChanged(); - } - if (!other.explicitHashKeyTable_.isEmpty()) { - if (explicitHashKeyTable_.isEmpty()) { - explicitHashKeyTable_ = other.explicitHashKeyTable_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureExplicitHashKeyTableIsMutable(); - explicitHashKeyTable_.addAll(other.explicitHashKeyTable_); - } - onChanged(); - } - if (recordsBuilder_ == null) { - if (!other.records_.isEmpty()) { - if (records_.isEmpty()) { - records_ = other.records_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureRecordsIsMutable(); - records_.addAll(other.records_); - } - onChanged(); - } - } else { - if (!other.records_.isEmpty()) { - if (recordsBuilder_.isEmpty()) { - recordsBuilder_.dispose(); - recordsBuilder_ = null; - records_ = other.records_; - bitField0_ = (bitField0_ & ~0x00000004); - recordsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getRecordsFieldBuilder() : null; - } else { - recordsBuilder_.addAllMessages(other.records_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - for (int i = 0; i < getRecordsCount(); i++) { - if (!getRecords(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Messages.AggregatedRecord parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (Messages.AggregatedRecord) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private com.google.protobuf.LazyStringList partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensurePartitionKeyTableIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - partitionKeyTable_ = new com.google.protobuf.LazyStringArrayList(partitionKeyTable_); - bitField0_ |= 0x00000001; - } - } - /** - * repeated string partition_key_table = 1; - */ - public com.google.protobuf.ProtocolStringList - getPartitionKeyTableList() { - return partitionKeyTable_.getUnmodifiableView(); - } - /** - * repeated string partition_key_table = 1; - */ - public int getPartitionKeyTableCount() { - return partitionKeyTable_.size(); - } - /** - * repeated string partition_key_table = 1; - */ - public java.lang.String getPartitionKeyTable(int index) { - return partitionKeyTable_.get(index); - } - /** - * repeated string partition_key_table = 1; - */ - public com.google.protobuf.ByteString - getPartitionKeyTableBytes(int index) { - return partitionKeyTable_.getByteString(index); - } - /** - * repeated string partition_key_table = 1; - */ - public Builder setPartitionKeyTable( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartitionKeyTableIsMutable(); - partitionKeyTable_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string partition_key_table = 1; - */ - public Builder addPartitionKeyTable( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartitionKeyTableIsMutable(); - partitionKeyTable_.add(value); - onChanged(); - return this; - } - /** - * repeated string partition_key_table = 1; - */ - public Builder addAllPartitionKeyTable( - java.lang.Iterable values) { - ensurePartitionKeyTableIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, partitionKeyTable_); - onChanged(); - return this; - } - /** - * repeated string partition_key_table = 1; - */ - public Builder clearPartitionKeyTable() { - partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - /** - * repeated string partition_key_table = 1; - */ - public Builder addPartitionKeyTableBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartitionKeyTableIsMutable(); - partitionKeyTable_.add(value); - onChanged(); - return this; - } - - private com.google.protobuf.LazyStringList explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureExplicitHashKeyTableIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - explicitHashKeyTable_ = new com.google.protobuf.LazyStringArrayList(explicitHashKeyTable_); - bitField0_ |= 0x00000002; - } - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public com.google.protobuf.ProtocolStringList - getExplicitHashKeyTableList() { - return explicitHashKeyTable_.getUnmodifiableView(); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public int getExplicitHashKeyTableCount() { - return explicitHashKeyTable_.size(); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public java.lang.String getExplicitHashKeyTable(int index) { - return explicitHashKeyTable_.get(index); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public com.google.protobuf.ByteString - getExplicitHashKeyTableBytes(int index) { - return explicitHashKeyTable_.getByteString(index); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder setExplicitHashKeyTable( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureExplicitHashKeyTableIsMutable(); - explicitHashKeyTable_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder addExplicitHashKeyTable( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureExplicitHashKeyTableIsMutable(); - explicitHashKeyTable_.add(value); - onChanged(); - return this; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder addAllExplicitHashKeyTable( - java.lang.Iterable values) { - ensureExplicitHashKeyTableIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, explicitHashKeyTable_); - onChanged(); - return this; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder clearExplicitHashKeyTable() { - explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - return this; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder addExplicitHashKeyTableBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureExplicitHashKeyTableIsMutable(); - explicitHashKeyTable_.add(value); - onChanged(); - return this; - } - - private java.util.List records_ = - java.util.Collections.emptyList(); - private void ensureRecordsIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - records_ = new java.util.ArrayList(records_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - Messages.Record, Messages.Record.Builder, Messages.RecordOrBuilder> recordsBuilder_; - - /** - * repeated .Record records = 3; - */ - public java.util.List getRecordsList() { - if (recordsBuilder_ == null) { - return java.util.Collections.unmodifiableList(records_); - } else { - return recordsBuilder_.getMessageList(); - } - } - /** - * repeated .Record records = 3; - */ - public int getRecordsCount() { - if (recordsBuilder_ == null) { - return records_.size(); - } else { - return recordsBuilder_.getCount(); - } - } - /** - * repeated .Record records = 3; - */ - public Messages.Record getRecords(int index) { - if (recordsBuilder_ == null) { - return records_.get(index); - } else { - return recordsBuilder_.getMessage(index); - } - } - /** - * repeated .Record records = 3; - */ - public Builder setRecords( - int index, Messages.Record value) { - if (recordsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRecordsIsMutable(); - records_.set(index, value); - onChanged(); - } else { - recordsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder setRecords( - int index, Messages.Record.Builder builderForValue) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - records_.set(index, builderForValue.build()); - onChanged(); - } else { - recordsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addRecords(Messages.Record value) { - if (recordsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRecordsIsMutable(); - records_.add(value); - onChanged(); - } else { - recordsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addRecords( - int index, Messages.Record value) { - if (recordsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRecordsIsMutable(); - records_.add(index, value); - onChanged(); - } else { - recordsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addRecords( - Messages.Record.Builder builderForValue) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - records_.add(builderForValue.build()); - onChanged(); - } else { - recordsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addRecords( - int index, Messages.Record.Builder builderForValue) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - records_.add(index, builderForValue.build()); - onChanged(); - } else { - recordsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addAllRecords( - java.lang.Iterable values) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, records_); - onChanged(); - } else { - recordsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder clearRecords() { - if (recordsBuilder_ == null) { - records_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - recordsBuilder_.clear(); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder removeRecords(int index) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - records_.remove(index); - onChanged(); - } else { - recordsBuilder_.remove(index); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Messages.Record.Builder getRecordsBuilder( - int index) { - return getRecordsFieldBuilder().getBuilder(index); - } - /** - * repeated .Record records = 3; - */ - public Messages.RecordOrBuilder getRecordsOrBuilder( - int index) { - if (recordsBuilder_ == null) { - return records_.get(index); } else { - return recordsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .Record records = 3; - */ - public java.util.List - getRecordsOrBuilderList() { - if (recordsBuilder_ != null) { - return recordsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(records_); - } - } - /** - * repeated .Record records = 3; - */ - public Messages.Record.Builder addRecordsBuilder() { - return getRecordsFieldBuilder().addBuilder( - Messages.Record.getDefaultInstance()); - } - /** - * repeated .Record records = 3; - */ - public Messages.Record.Builder addRecordsBuilder( - int index) { - return getRecordsFieldBuilder().addBuilder( - index, Messages.Record.getDefaultInstance()); - } - /** - * repeated .Record records = 3; - */ - public java.util.List - getRecordsBuilderList() { - return getRecordsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - Messages.Record, Messages.Record.Builder, Messages.RecordOrBuilder> - getRecordsFieldBuilder() { - if (recordsBuilder_ == null) { - recordsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - Messages.Record, Messages.Record.Builder, Messages.RecordOrBuilder>( - records_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - records_ = null; - } - return recordsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:AggregatedRecord) - } - - static { - defaultInstance = new AggregatedRecord(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:AggregatedRecord) - } - - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_Tag_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Tag_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_Record_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Record_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_AggregatedRecord_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_AggregatedRecord_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\016messages.proto\"!\n\003Tag\022\013\n\003key\030\001 \002(\t\022\r\n\005" + - "value\030\002 \001(\t\"h\n\006Record\022\033\n\023partition_key_i" + - "ndex\030\001 \002(\004\022\037\n\027explicit_hash_key_index\030\002 " + - "\001(\004\022\014\n\004data\030\003 \002(\014\022\022\n\004tags\030\004 \003(\0132\004.Tag\"j\n" + - "\020AggregatedRecord\022\033\n\023partition_key_table" + - "\030\001 \003(\t\022\037\n\027explicit_hash_key_table\030\002 \003(\t\022" + - "\030\n\007records\030\003 \003(\0132\007.Record" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - }, assigner); - internal_static_Tag_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_Tag_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Tag_descriptor, - new java.lang.String[] { "Key", "Value", }); - internal_static_Record_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_Record_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Record_descriptor, - new java.lang.String[] { "PartitionKeyIndex", "ExplicitHashKeyIndex", "Data", "Tags", }); - internal_static_AggregatedRecord_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_AggregatedRecord_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_AggregatedRecord_descriptor, - new java.lang.String[] { "PartitionKeyTable", "ExplicitHashKeyTable", "Records", }); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java index c142d8be..1501bb19 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java @@ -29,7 +29,6 @@ import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; @@ -53,19 +52,32 @@ public class AsynchronousGetRecordsRetrievalStrategy implements GetRecordsRetrie private final String shardId; final Supplier> completionServiceSupplier; - public AsynchronousGetRecordsRetrievalStrategy(@NonNull final KinesisDataFetcher dataFetcher, - final int retryGetRecordsInSeconds, final int maxGetRecordsThreadPool, String shardId) { + public AsynchronousGetRecordsRetrievalStrategy( + @NonNull final KinesisDataFetcher dataFetcher, + final int retryGetRecordsInSeconds, + final int maxGetRecordsThreadPool, + String shardId) { this(dataFetcher, buildExector(maxGetRecordsThreadPool, shardId), retryGetRecordsInSeconds, shardId); } - public AsynchronousGetRecordsRetrievalStrategy(final KinesisDataFetcher dataFetcher, - final ExecutorService executorService, final int retryGetRecordsInSeconds, String shardId) { - this(dataFetcher, executorService, retryGetRecordsInSeconds, () -> new ExecutorCompletionService<>(executorService), + public AsynchronousGetRecordsRetrievalStrategy( + final KinesisDataFetcher dataFetcher, + final ExecutorService executorService, + final int retryGetRecordsInSeconds, + String shardId) { + this( + dataFetcher, + executorService, + retryGetRecordsInSeconds, + () -> new ExecutorCompletionService<>(executorService), shardId); } - AsynchronousGetRecordsRetrievalStrategy(KinesisDataFetcher dataFetcher, ExecutorService executorService, - int retryGetRecordsInSeconds, Supplier> completionServiceSupplier, + AsynchronousGetRecordsRetrievalStrategy( + KinesisDataFetcher dataFetcher, + ExecutorService executorService, + int retryGetRecordsInSeconds, + Supplier> completionServiceSupplier, String shardId) { this.dataFetcher = dataFetcher; this.executorService = executorService; @@ -92,8 +104,8 @@ public class AsynchronousGetRecordsRetrievalStrategy implements GetRecordsRetrie } try { - Future resultFuture = completionService.poll(retryGetRecordsInSeconds, - TimeUnit.SECONDS); + Future resultFuture = + completionService.poll(retryGetRecordsInSeconds, TimeUnit.SECONDS); if (resultFuture != null) { // // Fix to ensure that we only let the shard iterator advance when we intend to return the result @@ -135,9 +147,16 @@ public class AsynchronousGetRecordsRetrievalStrategy implements GetRecordsRetrie private static ExecutorService buildExector(int maxGetRecordsThreadPool, String shardId) { String threadNameFormat = "get-records-worker-" + shardId + "-%d"; - return new ThreadPoolExecutor(CORE_THREAD_POOL_COUNT, maxGetRecordsThreadPool, TIME_TO_KEEP_ALIVE, - TimeUnit.SECONDS, new LinkedBlockingQueue<>(1), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadNameFormat).build(), + return new ThreadPoolExecutor( + CORE_THREAD_POOL_COUNT, + maxGetRecordsThreadPool, + TIME_TO_KEEP_ALIVE, + TimeUnit.SECONDS, + new LinkedBlockingQueue<>(1), + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat(threadNameFormat) + .build(), new ThreadPoolExecutor.AbortPolicy()); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java index 33be11d4..80d4ae61 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java @@ -20,10 +20,9 @@ import java.util.List; import java.util.stream.Collectors; import org.reactivestreams.Subscriber; +import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; import software.amazon.kinesis.common.RequestDetails; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; @@ -44,14 +43,15 @@ public class BlockingRecordsPublisher implements RecordsPublisher { private Subscriber subscriber; private RequestDetails lastSuccessfulRequestDetails = new RequestDetails(); - public BlockingRecordsPublisher(final int maxRecordsPerCall, - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy) { + public BlockingRecordsPublisher( + final int maxRecordsPerCall, final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy) { this.maxRecordsPerCall = maxRecordsPerCall; this.getRecordsRetrievalStrategy = getRecordsRetrievalStrategy; } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, + public void start( + ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) { // // Nothing to do here @@ -60,10 +60,12 @@ public class BlockingRecordsPublisher implements RecordsPublisher { public ProcessRecordsInput getNextResult() { GetRecordsResponse getRecordsResult = getRecordsRetrievalStrategy.getRecords(maxRecordsPerCall); - final RequestDetails getRecordsRequestDetails = new RequestDetails(getRecordsResult.responseMetadata().requestId(), Instant.now().toString()); + final RequestDetails getRecordsRequestDetails = new RequestDetails( + getRecordsResult.responseMetadata().requestId(), Instant.now().toString()); setLastSuccessfulRequestDetails(getRecordsRequestDetails); List records = getRecordsResult.records().stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + .map(KinesisClientRecord::fromRecord) + .collect(Collectors.toList()); return ProcessRecordsInput.builder() .records(records) .millisBehindLatest(getRecordsResult.millisBehindLatest()) diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/DataFetcher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/DataFetcher.java index ae1c6f30..ac71b4c7 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/DataFetcher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/DataFetcher.java @@ -17,6 +17,7 @@ package software.amazon.kinesis.retrieval.polling; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; + import lombok.NonNull; import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; @@ -40,8 +41,7 @@ public interface DataFetcher { * @param initialCheckpoint Current checkpoint sequence number for this shard. * @param initialPositionInStream The initialPositionInStream. */ - void initialize(String initialCheckpoint, - InitialPositionInStreamExtended initialPositionInStream); + void initialize(String initialCheckpoint, InitialPositionInStreamExtended initialPositionInStream); /** * Initializes this KinesisDataFetcher's iterator based on the checkpointed sequence number as an @@ -50,8 +50,7 @@ public interface DataFetcher { * @param initialCheckpoint Current checkpoint sequence number for this shard. * @param initialPositionInStream The initialPositionInStream. */ - void initialize(ExtendedSequenceNumber initialCheckpoint, - InitialPositionInStreamExtended initialPositionInStream); + void initialize(ExtendedSequenceNumber initialCheckpoint, InitialPositionInStreamExtended initialPositionInStream); /** * Advances this KinesisDataFetcher's internal iterator to be at the passed-in sequence number. @@ -59,8 +58,7 @@ public interface DataFetcher { * @param sequenceNumber advance the iterator to the record at this sequence number. * @param initialPositionInStream The initialPositionInStream. */ - void advanceIteratorTo(String sequenceNumber, - InitialPositionInStreamExtended initialPositionInStream); + void advanceIteratorTo(String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream); /** * Gets a new iterator from the last known sequence number i.e. the sequence number of the last record from the last @@ -75,7 +73,8 @@ public interface DataFetcher { * @param sequenceNumber reset the iterator to the record at this sequence number. * @param initialPositionInStream the current position in the stream to reset the iterator to. */ - void resetIterator(String shardIterator, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream); + void resetIterator( + String shardIterator, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream); /** * Retrieves the response based on the request. @@ -99,7 +98,8 @@ public interface DataFetcher { * @param request used to obtain the next shard iterator * @return next iterator string */ - String getNextIterator(GetShardIteratorRequest request) throws ExecutionException, InterruptedException, TimeoutException; + String getNextIterator(GetShardIteratorRequest request) + throws ExecutionException, InterruptedException, TimeoutException; /** * Gets the next set of records based on the iterator. diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java index d17828e9..85260e49 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java @@ -14,12 +14,12 @@ */ package software.amazon.kinesis.retrieval.polling; -import com.google.common.collect.Iterables; - import java.time.Duration; import java.util.Collections; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; + +import com.google.common.collect.Iterables; import lombok.AccessLevel; import lombok.Data; import lombok.Getter; @@ -73,25 +73,37 @@ public class KinesisDataFetcher implements DataFetcher { @NonNull private final KinesisAsyncClient kinesisClient; - @NonNull @Getter + + @NonNull + @Getter private final StreamIdentifier streamIdentifier; + @NonNull private final String shardId; + private final int maxRecords; + @NonNull private final MetricsFactory metricsFactory; + private final Duration maxFutureWait; private final String streamAndShardId; @Deprecated - public KinesisDataFetcher(KinesisAsyncClient kinesisClient, String streamName, String shardId, int maxRecords, MetricsFactory metricsFactory) { - this(kinesisClient, new KinesisDataFetcherProviderConfig( - StreamIdentifier.singleStreamInstance(streamName), - shardId, - metricsFactory, - maxRecords, - PollingConfig.DEFAULT_REQUEST_TIMEOUT - )); + public KinesisDataFetcher( + KinesisAsyncClient kinesisClient, + String streamName, + String shardId, + int maxRecords, + MetricsFactory metricsFactory) { + this( + kinesisClient, + new KinesisDataFetcherProviderConfig( + StreamIdentifier.singleStreamInstance(streamName), + shardId, + metricsFactory, + maxRecords, + PollingConfig.DEFAULT_REQUEST_TIMEOUT)); } /** @@ -106,7 +118,8 @@ public class KinesisDataFetcher implements DataFetcher { * @param kinesisClient * @param kinesisDataFetcherProviderConfig */ - public KinesisDataFetcher(KinesisAsyncClient kinesisClient, DataFetcherProviderConfig kinesisDataFetcherProviderConfig) { + public KinesisDataFetcher( + KinesisAsyncClient kinesisClient, DataFetcherProviderConfig kinesisDataFetcherProviderConfig) { this.kinesisClient = kinesisClient; this.maxFutureWait = kinesisDataFetcherProviderConfig.getKinesisRequestTimeout(); this.maxRecords = kinesisDataFetcherProviderConfig.getMaxRecords(); @@ -118,6 +131,7 @@ public class KinesisDataFetcher implements DataFetcher { @Getter private boolean isShardEndReached; + private boolean isInitialized; private String lastKnownSequenceNumber; private InitialPositionInStreamExtended initialPositionInStream; @@ -145,15 +159,17 @@ public class KinesisDataFetcher implements DataFetcher { } } + // CHECKSTYLE.OFF: MemberName final DataFetcherResult TERMINAL_RESULT = new DataFetcherResult() { + // CHECKSTYLE.ON: MemberName @Override public GetRecordsResponse getResult() { return GetRecordsResponse.builder() - .millisBehindLatest(null) - .records(Collections.emptyList()) - .nextShardIterator(null) - .childShards(Collections.emptyList()) - .build(); + .millisBehindLatest(null) + .records(Collections.emptyList()) + .nextShardIterator(null) + .childShards(Collections.emptyList()) + .build(); } @Override @@ -202,16 +218,17 @@ public class KinesisDataFetcher implements DataFetcher { * @param initialPositionInStream The initialPositionInStream. */ @Override - public void initialize(final String initialCheckpoint, - final InitialPositionInStreamExtended initialPositionInStream) { + public void initialize( + final String initialCheckpoint, final InitialPositionInStreamExtended initialPositionInStream) { log.info("Initializing shard {} with {}", streamAndShardId, initialCheckpoint); advanceIteratorTo(initialCheckpoint, initialPositionInStream); isInitialized = true; } @Override - public void initialize(final ExtendedSequenceNumber initialCheckpoint, - final InitialPositionInStreamExtended initialPositionInStream) { + public void initialize( + final ExtendedSequenceNumber initialCheckpoint, + final InitialPositionInStreamExtended initialPositionInStream) { log.info("Initializing shard {} with {}", streamAndShardId, initialCheckpoint.sequenceNumber()); advanceIteratorTo(initialCheckpoint.sequenceNumber(), initialPositionInStream); isInitialized = true; @@ -224,26 +241,33 @@ public class KinesisDataFetcher implements DataFetcher { * @param initialPositionInStream The initialPositionInStream. */ @Override - public void advanceIteratorTo(final String sequenceNumber, - final InitialPositionInStreamExtended initialPositionInStream) { + public void advanceIteratorTo( + final String sequenceNumber, final InitialPositionInStreamExtended initialPositionInStream) { advanceIteratorTo(sequenceNumber, initialPositionInStream, false); } - private void advanceIteratorTo(final String sequenceNumber, - final InitialPositionInStreamExtended initialPositionInStream, - boolean isIteratorRestart) { + private void advanceIteratorTo( + final String sequenceNumber, + final InitialPositionInStreamExtended initialPositionInStream, + boolean isIteratorRestart) { if (sequenceNumber == null) { throw new IllegalArgumentException("SequenceNumber should not be null: shardId " + shardId); } GetShardIteratorRequest.Builder builder = KinesisRequestsBuilder.getShardIteratorRequestBuilder() - .streamName(streamIdentifier.streamName()).shardId(shardId); + .streamName(streamIdentifier.streamName()) + .shardId(shardId); + streamIdentifier.streamArnOptional().ifPresent(arn -> builder.streamARN(arn.toString())); + GetShardIteratorRequest request; if (isIteratorRestart) { - request = IteratorBuilder.reconnectRequest(builder, sequenceNumber, initialPositionInStream).build(); + request = IteratorBuilder.reconnectRequest(builder, sequenceNumber, initialPositionInStream) + .build(); } else { - request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStream).build(); + request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStream) + .build(); } + log.debug("[GetShardIterator] Request has parameters {}", request); // TODO: Check if this metric is fine to be added final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, OPERATION); @@ -268,8 +292,12 @@ public class KinesisDataFetcher implements DataFetcher { log.info("Caught ResourceNotFoundException when getting an iterator for shard {}", streamAndShardId, e); nextIterator = null; } finally { - MetricsUtil.addSuccessAndLatency(metricsScope, String.format("%s.%s", METRICS_PREFIX, "getShardIterator"), - success, startTime, MetricsLevel.DETAILED); + MetricsUtil.addSuccessAndLatency( + metricsScope, + String.format("%s.%s", METRICS_PREFIX, "getShardIterator"), + success, + startTime, + MetricsLevel.DETAILED); MetricsUtil.endScope(metricsScope); } @@ -290,40 +318,47 @@ public class KinesisDataFetcher implements DataFetcher { throw new IllegalStateException( "Make sure to initialize the KinesisDataFetcher before restarting the iterator."); } - log.debug("Restarting iterator for sequence number {} on shard id {}", - lastKnownSequenceNumber, streamAndShardId); + log.debug( + "Restarting iterator for sequence number {} on shard id {}", lastKnownSequenceNumber, streamAndShardId); advanceIteratorTo(lastKnownSequenceNumber, initialPositionInStream, true); } @Override - public void resetIterator(String shardIterator, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream) { + public void resetIterator( + String shardIterator, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream) { this.nextIterator = shardIterator; this.lastKnownSequenceNumber = sequenceNumber; this.initialPositionInStream = initialPositionInStream; } @Override - public GetRecordsResponse getGetRecordsResponse(GetRecordsRequest request) throws ExecutionException, InterruptedException, TimeoutException { - final GetRecordsResponse response = FutureUtils.resolveOrCancelFuture(kinesisClient.getRecords(request), - maxFutureWait); + public GetRecordsResponse getGetRecordsResponse(GetRecordsRequest request) + throws ExecutionException, InterruptedException, TimeoutException { + final GetRecordsResponse response = + FutureUtils.resolveOrCancelFuture(kinesisClient.getRecords(request), maxFutureWait); if (!isValidResult(response.nextShardIterator(), response.childShards())) { throw new RetryableRetrievalException("GetRecords response is not valid for shard: " + streamAndShardId + ". nextShardIterator: " + response.nextShardIterator() - + ". childShards: " + response.childShards() + ". Will retry GetRecords with the same nextIterator."); + + ". childShards: " + response.childShards() + + ". Will retry GetRecords with the same nextIterator."); } return response; } @Override - public GetRecordsRequest getGetRecordsRequest(String nextIterator) { - return KinesisRequestsBuilder.getRecordsRequestBuilder().shardIterator(nextIterator) - .limit(maxRecords).build(); + public GetRecordsRequest getGetRecordsRequest(String nextIterator) { + GetRecordsRequest.Builder builder = KinesisRequestsBuilder.getRecordsRequestBuilder() + .shardIterator(nextIterator) + .limit(maxRecords); + streamIdentifier.streamArnOptional().ifPresent(arn -> builder.streamARN(arn.toString())); + return builder.build(); } @Override - public String getNextIterator(GetShardIteratorRequest request) throws ExecutionException, InterruptedException, TimeoutException { - final GetShardIteratorResponse result = FutureUtils - .resolveOrCancelFuture(kinesisClient.getShardIterator(request), maxFutureWait); + public String getNextIterator(GetShardIteratorRequest request) + throws ExecutionException, InterruptedException, TimeoutException { + final GetShardIteratorResponse result = + FutureUtils.resolveOrCancelFuture(kinesisClient.getShardIterator(request), maxFutureWait); return result.shardIterator(); } @@ -334,7 +369,7 @@ public class KinesisDataFetcher implements DataFetcher { final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, OPERATION); MetricsUtil.addStreamId(metricsScope, streamIdentifier); MetricsUtil.addShardId(metricsScope, shardId); - boolean success = false ; + boolean success = false; long startTime = System.currentTimeMillis(); try { final GetRecordsResponse response = getGetRecordsResponse(request); @@ -349,8 +384,12 @@ public class KinesisDataFetcher implements DataFetcher { } catch (TimeoutException e) { throw new RetryableRetrievalException(e.getMessage(), e); } finally { - MetricsUtil.addSuccessAndLatency(metricsScope, String.format("%s.%s", METRICS_PREFIX, "getRecords"), - success, startTime, MetricsLevel.DETAILED); + MetricsUtil.addSuccessAndLatency( + metricsScope, + String.format("%s.%s", METRICS_PREFIX, "getRecords"), + success, + startTime, + MetricsLevel.DETAILED); MetricsUtil.endScope(metricsScope); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java index a37e7121..1fe924d7 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java @@ -18,6 +18,8 @@ package software.amazon.kinesis.retrieval.polling; import java.time.Duration; import java.util.Optional; import java.util.function.Function; + +import lombok.AccessLevel; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NonNull; @@ -40,6 +42,8 @@ public class PollingConfig implements RetrievalSpecificConfig { public static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(30); + public static final int DEFAULT_MAX_RECORDS = 10000; + /** * Configurable functional interface to override the existing DataFetcher. */ @@ -71,7 +75,7 @@ public class PollingConfig implements RetrievalSpecificConfig { * Default value: 10000 *

    */ - private int maxRecords = 10000; + private int maxRecords = DEFAULT_MAX_RECORDS; /** * @param streamName Name of Kinesis stream. @@ -86,11 +90,15 @@ public class PollingConfig implements RetrievalSpecificConfig { * The value for how long the ShardConsumer should sleep in between calls to * {@link KinesisAsyncClient#getRecords(GetRecordsRequest)}. * + * If this is not set using {@link PollingConfig#idleTimeBetweenReadsInMillis}, + * it defaults to 1500 ms. + * *

    - * Default value: 1000L + * Default value: 1500L *

    */ - private long idleTimeBetweenReadsInMillis = 1000L; + @Setter(AccessLevel.NONE) + private long idleTimeBetweenReadsInMillis = 1500L; /** * Time to wait in seconds before the worker retries to get a record. @@ -119,14 +127,32 @@ public class PollingConfig implements RetrievalSpecificConfig { */ private RecordsFetcherFactory recordsFetcherFactory = new SimpleRecordsFetcherFactory(); + /** + * @Deprecated Use {@link PollingConfig#idleTimeBetweenReadsInMillis} instead + */ + @Deprecated + public void setIdleTimeBetweenReadsInMillis(long idleTimeBetweenReadsInMillis) { + idleTimeBetweenReadsInMillis(idleTimeBetweenReadsInMillis); + } + /** * Set the value for how long the ShardConsumer should sleep in between calls to * {@link KinesisAsyncClient#getRecords(GetRecordsRequest)}. If this is not specified here the value provided in * {@link RecordsFetcherFactory} will be used. */ - public void setIdleTimeBetweenReadsInMillis(long idleTimeBetweenReadsInMillis) { + public PollingConfig idleTimeBetweenReadsInMillis(long idleTimeBetweenReadsInMillis) { usePollingConfigIdleTimeValue = true; this.idleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis; + return this; + } + + public PollingConfig maxRecords(int maxRecords) { + if (maxRecords > DEFAULT_MAX_RECORDS) { + throw new IllegalArgumentException("maxRecords must be less than or equal to " + DEFAULT_MAX_RECORDS + + " but current value is " + maxRecords()); + } + this.maxRecords = maxRecords; + return this; } /** @@ -137,10 +163,25 @@ public class PollingConfig implements RetrievalSpecificConfig { @Override public RetrievalFactory retrievalFactory() { // Prioritize the PollingConfig specified value if its updated. - if(usePollingConfigIdleTimeValue) { + if (usePollingConfigIdleTimeValue) { recordsFetcherFactory.idleMillisBetweenCalls(idleTimeBetweenReadsInMillis); } - return new SynchronousBlockingRetrievalFactory(streamName(), kinesisClient(), recordsFetcherFactory, - maxRecords(), kinesisRequestTimeout, dataFetcherProvider); + return new SynchronousBlockingRetrievalFactory( + streamName(), + kinesisClient(), + recordsFetcherFactory, + maxRecords(), + kinesisRequestTimeout, + dataFetcherProvider); + } + + @Override + public void validateState(final boolean isMultiStream) { + if (isMultiStream) { + if (streamName() != null) { + throw new IllegalArgumentException( + "PollingConfig must not have streamName configured in multi-stream mode"); + } + } } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java index ab406244..02e2f7f5 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java @@ -15,8 +15,6 @@ package software.amazon.kinesis.retrieval.polling; -import com.google.common.annotations.VisibleForTesting; - import java.time.Duration; import java.time.Instant; import java.util.List; @@ -27,6 +25,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; + +import com.google.common.annotations.VisibleForTesting; import lombok.AccessLevel; import lombok.Data; import lombok.Getter; @@ -42,6 +42,7 @@ import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.InvalidArgumentException; import software.amazon.awssdk.services.kinesis.model.ProvisionedThroughputExceededException; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.common.InitialPositionInStreamExtended; @@ -61,6 +62,7 @@ import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.RecordsRetrieved; import software.amazon.kinesis.retrieval.RetryableRetrievalException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + import static software.amazon.kinesis.common.DiagnosticUtils.takeDelayedDeliveryActionIfRequired; /** @@ -98,11 +100,13 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { private final String streamAndShardId; private final long awaitTerminationTimeoutMillis; private Subscriber subscriber; - @VisibleForTesting @Getter + + @VisibleForTesting + @Getter private final PublisherSession publisherSession; + private final ReentrantReadWriteLock resetLock = new ReentrantReadWriteLock(); private boolean wasReset = false; - private Instant lastEventDeliveryTime = Instant.EPOCH; private final RequestDetails lastSuccessfulRequestDetails = new RequestDetails(); @@ -110,15 +114,19 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { @Accessors(fluent = true) static final class PublisherSession { private final AtomicLong requestedResponses = new AtomicLong(0); - @VisibleForTesting @Getter + + @VisibleForTesting + @Getter private final LinkedBlockingQueue prefetchRecordsQueue; + private final PrefetchCounters prefetchCounters; private final DataFetcher dataFetcher; private InitialPositionInStreamExtended initialPositionInStreamExtended; private String highestSequenceNumber; // Initialize the session on publisher start. - void init(ExtendedSequenceNumber extendedSequenceNumber, + void init( + ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) { this.initialPositionInStreamExtended = initialPositionInStreamExtended; this.highestSequenceNumber = extendedSequenceNumber.sequenceNumber(); @@ -134,16 +142,18 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { prefetchRecordsQueue.clear(); prefetchCounters.reset(); highestSequenceNumber = prefetchRecordsRetrieved.lastBatchSequenceNumber(); - dataFetcher.resetIterator(prefetchRecordsRetrieved.shardIterator(), highestSequenceNumber, - initialPositionInStreamExtended); + dataFetcher.resetIterator( + prefetchRecordsRetrieved.shardIterator(), highestSequenceNumber, initialPositionInStreamExtended); } // Handle records delivery ack and execute nextEventDispatchAction. // This method is not thread-safe and needs to be called after acquiring a monitor. - void handleRecordsDeliveryAck(RecordsDeliveryAck recordsDeliveryAck, String streamAndShardId, Runnable nextEventDispatchAction) { + void handleRecordsDeliveryAck( + RecordsDeliveryAck recordsDeliveryAck, String streamAndShardId, Runnable nextEventDispatchAction) { final PrefetchRecordsRetrieved recordsToCheck = peekNextRecord(); // Verify if the ack matches the head of the queue and evict it. - if (recordsToCheck != null && recordsToCheck.batchUniqueIdentifier().equals(recordsDeliveryAck.batchUniqueIdentifier())) { + if (recordsToCheck != null + && recordsToCheck.batchUniqueIdentifier().equals(recordsDeliveryAck.batchUniqueIdentifier())) { evictPublishedRecordAndUpdateDemand(streamAndShardId); nextEventDispatchAction.run(); } else { @@ -152,8 +162,12 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { // to happen. final BatchUniqueIdentifier peekedBatchUniqueIdentifier = recordsToCheck == null ? null : recordsToCheck.batchUniqueIdentifier(); - log.info("{} : Received a stale notification with id {} instead of expected id {} at {}. Will ignore.", - streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier(), peekedBatchUniqueIdentifier, Instant.now()); + log.info( + "{} : Received a stale notification with id {} instead of expected id {} at {}. Will ignore.", + streamAndShardId, + recordsDeliveryAck.batchUniqueIdentifier(), + peekedBatchUniqueIdentifier, + Instant.now()); } } @@ -167,7 +181,8 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { } else { log.info( "{}: No record batch found while evicting from the prefetch queue. This indicates the prefetch buffer" - + " was reset.", streamAndShardId); + + " was reset.", + streamAndShardId); } return result; } @@ -180,7 +195,8 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { return prefetchRecordsQueue.peek(); } - boolean offerRecords(PrefetchRecordsRetrieved recordsRetrieved, long idleMillisBetweenCalls) throws InterruptedException { + boolean offerRecords(PrefetchRecordsRetrieved recordsRetrieved, long idleMillisBetweenCalls) + throws InterruptedException { return prefetchRecordsQueue.offer(recordsRetrieved, idleMillisBetweenCalls, TimeUnit.MILLISECONDS); } @@ -188,15 +204,14 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { prefetchCounters.removed(result.processRecordsInput); requestedResponses.decrementAndGet(); } - } /** * Constructor for the PrefetchRecordsPublisher. This cache prefetches records from Kinesis and stores them in a * LinkedBlockingQueue. - * + * * @see PrefetchRecordsPublisher - * + * * @param maxPendingProcessRecordsInput Max number of ProcessRecordsInput that can be held in the cache before * blocking * @param maxByteSize Max byte size of the queue before blocking next get records call @@ -207,22 +222,27 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { * @param idleMillisBetweenCalls maximum time to wait before dispatching the next get records call * @param awaitTerminationTimeoutMillis maximum time to wait for graceful shutdown of executorService */ - public PrefetchRecordsPublisher(final int maxPendingProcessRecordsInput, final int maxByteSize, final int maxRecordsCount, - final int maxRecordsPerCall, - @NonNull final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, - @NonNull final ExecutorService executorService, - final long idleMillisBetweenCalls, - @NonNull final MetricsFactory metricsFactory, - @NonNull final String operation, - @NonNull final String shardId, - final long awaitTerminationTimeoutMillis) { + public PrefetchRecordsPublisher( + final int maxPendingProcessRecordsInput, + final int maxByteSize, + final int maxRecordsCount, + final int maxRecordsPerCall, + @NonNull final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, + @NonNull final ExecutorService executorService, + final long idleMillisBetweenCalls, + @NonNull final MetricsFactory metricsFactory, + @NonNull final String operation, + @NonNull final String shardId, + final long awaitTerminationTimeoutMillis) { this.getRecordsRetrievalStrategy = getRecordsRetrievalStrategy; this.maxRecordsPerCall = maxRecordsPerCall; this.maxPendingProcessRecordsInput = maxPendingProcessRecordsInput; this.maxByteSize = maxByteSize; this.maxRecordsCount = maxRecordsCount; - this.publisherSession = new PublisherSession(new LinkedBlockingQueue<>(this.maxPendingProcessRecordsInput), - new PrefetchCounters(), this.getRecordsRetrievalStrategy.dataFetcher()); + this.publisherSession = new PublisherSession( + new LinkedBlockingQueue<>(this.maxPendingProcessRecordsInput), + new PrefetchCounters(), + this.getRecordsRetrievalStrategy.dataFetcher()); this.executorService = executorService; this.metricsFactory = new ThreadSafeMetricsDelegatingFactory(metricsFactory); this.idleMillisBetweenCalls = idleMillisBetweenCalls; @@ -249,22 +269,35 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { * @param executorService Executor service for the cache * @param idleMillisBetweenCalls maximum time to wait before dispatching the next get records call */ - public PrefetchRecordsPublisher(final int maxPendingProcessRecordsInput, final int maxByteSize, final int maxRecordsCount, - final int maxRecordsPerCall, - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, - final ExecutorService executorService, - final long idleMillisBetweenCalls, - final MetricsFactory metricsFactory, - final String operation, - final String shardId) { - this(maxPendingProcessRecordsInput, maxByteSize, maxRecordsCount, maxRecordsPerCall, - getRecordsRetrievalStrategy, executorService, idleMillisBetweenCalls, - metricsFactory, operation, shardId, - DEFAULT_AWAIT_TERMINATION_TIMEOUT_MILLIS); + public PrefetchRecordsPublisher( + final int maxPendingProcessRecordsInput, + final int maxByteSize, + final int maxRecordsCount, + final int maxRecordsPerCall, + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, + final ExecutorService executorService, + final long idleMillisBetweenCalls, + final MetricsFactory metricsFactory, + final String operation, + final String shardId) { + this( + maxPendingProcessRecordsInput, + maxByteSize, + maxRecordsCount, + maxRecordsPerCall, + getRecordsRetrievalStrategy, + executorService, + idleMillisBetweenCalls, + metricsFactory, + operation, + shardId, + DEFAULT_AWAIT_TERMINATION_TIMEOUT_MILLIS); } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) { + public void start( + ExtendedSequenceNumber extendedSequenceNumber, + InitialPositionInStreamExtended initialPositionInStreamExtended) { if (executorService.isShutdown()) { throw new IllegalStateException("ExecutorService has been shutdown."); } @@ -289,7 +322,6 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { } private PrefetchRecordsRetrieved peekNextResult() { - throwOnIllegalState(); return publisherSession.peekNextRecord(); } @@ -327,7 +359,7 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { } resetLock.writeLock().lock(); try { - publisherSession.reset((PrefetchRecordsRetrieved)recordsRetrieved); + publisherSession.reset((PrefetchRecordsRetrieved) recordsRetrieved); wasReset = true; } finally { resetLock.writeLock().unlock(); @@ -336,6 +368,7 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { @Override public void subscribe(Subscriber s) { + throwOnIllegalState(); subscriber = s; subscriber.onSubscribe(new Subscription() { @Override @@ -389,6 +422,7 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { // If there is an event available to drain and if there is at least one demand, // then schedule it for delivery if (publisherSession.hasDemandToPublish() && canDispatchRecord(recordsToDeliver)) { + throwOnIllegalState(); subscriber.onNext(recordsToDeliver.prepareForPublish()); recordsToDeliver.dispatched(); lastEventDeliveryTime = Instant.now(); @@ -409,11 +443,17 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { final String lastBatchSequenceNumber; final String shardIterator; final BatchUniqueIdentifier batchUniqueIdentifier; - @Accessors(fluent = false) @Setter(AccessLevel.NONE) boolean dispatched = false; + + @Accessors(fluent = false) + @Setter(AccessLevel.NONE) + boolean dispatched = false; PrefetchRecordsRetrieved prepareForPublish() { - return new PrefetchRecordsRetrieved(processRecordsInput.toBuilder().cacheExitTime(Instant.now()).build(), - lastBatchSequenceNumber, shardIterator, batchUniqueIdentifier); + return new PrefetchRecordsRetrieved( + processRecordsInput.toBuilder().cacheExitTime(Instant.now()).build(), + lastBatchSequenceNumber, + shardIterator, + batchUniqueIdentifier); } @Override @@ -422,30 +462,32 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { } // Indicates if this record batch was already dispatched for delivery. - void dispatched() { dispatched = true; } + void dispatched() { + dispatched = true; + } /** * Generate batch unique identifier for PrefetchRecordsRetrieved, where flow will be empty. * @return BatchUniqueIdentifier */ public static BatchUniqueIdentifier generateBatchUniqueIdentifier() { - return new BatchUniqueIdentifier(UUID.randomUUID().toString(), - StringUtils.EMPTY); + return new BatchUniqueIdentifier(UUID.randomUUID().toString(), StringUtils.EMPTY); } - } private String calculateHighestSequenceNumber(ProcessRecordsInput processRecordsInput) { String result = publisherSession.highestSequenceNumber(); - if (processRecordsInput.records() != null && !processRecordsInput.records().isEmpty()) { - result = processRecordsInput.records().get(processRecordsInput.records().size() - 1).sequenceNumber(); + if (processRecordsInput.records() != null + && !processRecordsInput.records().isEmpty()) { + result = processRecordsInput + .records() + .get(processRecordsInput.records().size() - 1) + .sequenceNumber(); } return result; } - private static class PositionResetException extends RuntimeException { - - } + private static class PositionResetException extends RuntimeException {} private class DefaultGetRecordsCacheDaemon implements Runnable { volatile boolean isShutdown = false; @@ -461,16 +503,19 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { try { resetLock.readLock().lock(); makeRetrievalAttempt(); - } catch(PositionResetException pre) { + } catch (PositionResetException pre) { log.debug("{} : Position was reset while attempting to add item to queue.", streamAndShardId); } catch (Throwable e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); } - log.error("{} : Unexpected exception was thrown. This could probably be an issue or a bug." + - " Please search for the exception/error online to check what is going on. If the " + - "issue persists or is a recurring problem, feel free to open an issue on, " + - "https://github.com/awslabs/amazon-kinesis-client.", streamAndShardId, e); + log.error( + "{} : Unexpected exception was thrown. This could probably be an issue or a bug." + + " Please search for the exception/error online to check what is going on. If the " + + "issue persists or is a recurring problem, feel free to open an issue on, " + + "https://github.com/awslabs/amazon-kinesis-client.", + streamAndShardId, + e); } finally { resetLock.readLock().unlock(); } @@ -487,33 +532,52 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { lastSuccessfulCall = Instant.now(); final List records = getRecordsResult.records().stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + .map(KinesisClientRecord::fromRecord) + .collect(Collectors.toList()); ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder() .records(records) .millisBehindLatest(getRecordsResult.millisBehindLatest()) .cacheEntryTime(lastSuccessfulCall) - .isAtShardEnd(getRecordsRetrievalStrategy.dataFetcher().isShardEndReached()) + .isAtShardEnd( + getRecordsRetrievalStrategy.dataFetcher().isShardEndReached()) .childShards(getRecordsResult.childShards()) .build(); - PrefetchRecordsRetrieved recordsRetrieved = new PrefetchRecordsRetrieved(processRecordsInput, - calculateHighestSequenceNumber(processRecordsInput), getRecordsResult.nextShardIterator(), + PrefetchRecordsRetrieved recordsRetrieved = new PrefetchRecordsRetrieved( + processRecordsInput, + calculateHighestSequenceNumber(processRecordsInput), + getRecordsResult.nextShardIterator(), PrefetchRecordsRetrieved.generateBatchUniqueIdentifier()); publisherSession.highestSequenceNumber(recordsRetrieved.lastBatchSequenceNumber); - log.debug("Last sequence number retrieved for streamAndShardId {} is {}", streamAndShardId, + log.debug( + "Last sequence number retrieved for streamAndShardId {} is {}", + streamAndShardId, recordsRetrieved.lastBatchSequenceNumber); addArrivedRecordsInput(recordsRetrieved); drainQueueForRequests(); } catch (PositionResetException pse) { throw pse; } catch (RetryableRetrievalException rre) { - log.info("{} : Timeout occurred while waiting for response from Kinesis. Will retry the request.", streamAndShardId); + log.info( + "{} : Timeout occurred while waiting for response from Kinesis. Will retry the request.", + streamAndShardId); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - log.info("{} : Thread was interrupted, indicating shutdown was called on the cache.", streamAndShardId); + log.info( + "{} : Thread was interrupted, indicating shutdown was called on the cache.", + streamAndShardId); + } catch (InvalidArgumentException e) { + log.info( + "{} : records threw InvalidArgumentException - iterator will be refreshed before retrying", + streamAndShardId, + e); + publisherSession.dataFetcher().restartIterator(); } catch (ExpiredIteratorException e) { - log.info("{} : records threw ExpiredIteratorException - restarting" - + " after greatest seqNum passed to customer", streamAndShardId, e); + log.info( + "{} : records threw ExpiredIteratorException - restarting" + + " after greatest seqNum passed to customer", + streamAndShardId, + e); MetricsUtil.addStreamId(scope, streamId); scope.addData(EXPIRED_ITERATOR_METRIC, 1, StandardUnit.COUNT, MetricsLevel.SUMMARY); @@ -537,8 +601,10 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { publisherSession.prefetchCounters().waitForConsumer(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); - log.info("{} : Thread was interrupted while waiting for the consumer. " + - "Shutdown has probably been started", streamAndShardId); + log.info( + "{} : Thread was interrupted while waiting for the consumer. " + + "Shutdown has probably been started", + streamAndShardId); } } } @@ -555,14 +621,18 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { return; } // Add a sleep if lastSuccessfulCall is still null but this is not the first try to avoid retry storm - if(lastSuccessfulCall == null) { + if (lastSuccessfulCall == null) { Thread.sleep(idleMillisBetweenCalls); return; } - long timeSinceLastCall = Duration.between(lastSuccessfulCall, Instant.now()).abs().toMillis(); + long timeSinceLastCall = + Duration.between(lastSuccessfulCall, Instant.now()).abs().toMillis(); if (timeSinceLastCall < idleMillisBetweenCalls) { Thread.sleep(idleMillisBetweenCalls - timeSinceLastCall); } + + // avoid immediate-retry storms + lastSuccessfulCall = null; } } @@ -586,12 +656,15 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { } private long getByteSize(final ProcessRecordsInput result) { - return result.records().stream().mapToLong(record -> record.data().limit()).sum(); + return result.records().stream() + .mapToLong(record -> record.data().limit()) + .sum(); } public synchronized void waitForConsumer() throws InterruptedException { if (!shouldGetNewRecords()) { - log.debug("{} : Queue is full waiting for consumer for {} ms", streamAndShardId, idleMillisBetweenCalls); + log.debug( + "{} : Queue is full waiting for consumer for {} ms", streamAndShardId, idleMillisBetweenCalls); this.wait(idleMillisBetweenCalls); } } @@ -610,9 +683,9 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { @Override public String toString() { - return String.format("{ Requests: %d, Records: %d, Bytes: %d }", publisherSession.prefetchRecordsQueue().size(), size, - byteSize); + return String.format( + "{ Requests: %d, Records: %d, Bytes: %d }", + publisherSession.prefetchRecordsQueue().size(), size, byteSize); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java index a74e3f31..2f1dea62 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java @@ -17,7 +17,6 @@ package software.amazon.kinesis.retrieval.polling; import java.util.concurrent.Executors; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.metrics.MetricsFactory; @@ -36,26 +35,37 @@ public class SimpleRecordsFetcherFactory implements RecordsFetcherFactory { private DataFetchingStrategy dataFetchingStrategy = DataFetchingStrategy.DEFAULT; @Override - public RecordsPublisher createRecordsFetcher(GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, String shardId, - MetricsFactory metricsFactory, int maxRecords) { + public RecordsPublisher createRecordsFetcher( + GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, + String shardId, + MetricsFactory metricsFactory, + int maxRecords) { - return new PrefetchRecordsPublisher(maxPendingProcessRecordsInput, maxByteSize, maxRecordsCount, maxRecords, + return new PrefetchRecordsPublisher( + maxPendingProcessRecordsInput, + maxByteSize, + maxRecordsCount, + maxRecords, getRecordsRetrievalStrategy, - Executors - .newFixedThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("prefetch-cache-" + shardId + "-%04d").build()), - idleMillisBetweenCalls, metricsFactory, "ProcessTask", shardId); - + Executors.newFixedThreadPool( + 1, + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("prefetch-cache-" + shardId + "-%04d") + .build()), + idleMillisBetweenCalls, + metricsFactory, + "ProcessTask", + shardId); } @Override - public void maxPendingProcessRecordsInput(int maxPendingProcessRecordsInput){ + public void maxPendingProcessRecordsInput(int maxPendingProcessRecordsInput) { this.maxPendingProcessRecordsInput = maxPendingProcessRecordsInput; } @Override - public void maxByteSize(int maxByteSize){ + public void maxByteSize(int maxByteSize) { this.maxByteSize = maxByteSize; } @@ -65,7 +75,7 @@ public class SimpleRecordsFetcherFactory implements RecordsFetcherFactory { } @Override - public void dataFetchingStrategy(DataFetchingStrategy dataFetchingStrategy){ + public void dataFetchingStrategy(DataFetchingStrategy dataFetchingStrategy) { this.dataFetchingStrategy = dataFetchingStrategy; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java index 071763fc..509e261f 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java @@ -17,10 +17,12 @@ package software.amazon.kinesis.retrieval.polling; import java.time.Duration; import java.util.function.Function; + import lombok.Data; import lombok.NonNull; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.annotations.KinesisClientInternalApi; +import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.leases.ShardInfo; import software.amazon.kinesis.metrics.MetricsFactory; @@ -40,8 +42,10 @@ public class SynchronousBlockingRetrievalFactory implements RetrievalFactory { @NonNull private final String streamName; + @NonNull private final KinesisAsyncClient kinesisClient; + @NonNull private final RecordsFetcherFactory recordsFetcherFactory; @@ -50,41 +54,20 @@ public class SynchronousBlockingRetrievalFactory implements RetrievalFactory { private final Function dataFetcherProvider; - @Deprecated - public SynchronousBlockingRetrievalFactory(String streamName, - KinesisAsyncClient kinesisClient, - RecordsFetcherFactory recordsFetcherFactory, - int maxRecords, - Duration kinesisRequestTimeout) { - this(streamName, - kinesisClient, - recordsFetcherFactory, - maxRecords, - kinesisRequestTimeout, - defaultDataFetcherProvider(kinesisClient)); - } - - public SynchronousBlockingRetrievalFactory(String streamName, - KinesisAsyncClient kinesisClient, - RecordsFetcherFactory recordsFetcherFactory, - int maxRecords, - Duration kinesisRequestTimeout, - Function dataFetcherProvider) { + public SynchronousBlockingRetrievalFactory( + String streamName, + KinesisAsyncClient kinesisClient, + RecordsFetcherFactory recordsFetcherFactory, + int maxRecords, + Duration kinesisRequestTimeout, + Function dataFetcherProvider) { this.streamName = streamName; this.kinesisClient = kinesisClient; this.recordsFetcherFactory = recordsFetcherFactory; this.maxRecords = maxRecords; this.kinesisRequestTimeout = kinesisRequestTimeout; - this.dataFetcherProvider = dataFetcherProvider == null ? - defaultDataFetcherProvider(kinesisClient) : dataFetcherProvider; - } - - @Deprecated - public SynchronousBlockingRetrievalFactory(String streamName, - KinesisAsyncClient kinesisClient, - RecordsFetcherFactory recordsFetcherFactory, - int maxRecords) { - this(streamName, kinesisClient, recordsFetcherFactory, maxRecords, PollingConfig.DEFAULT_REQUEST_TIMEOUT); + this.dataFetcherProvider = + dataFetcherProvider == null ? defaultDataFetcherProvider(kinesisClient) : dataFetcherProvider; } private static Function defaultDataFetcherProvider( @@ -92,19 +75,12 @@ public class SynchronousBlockingRetrievalFactory implements RetrievalFactory { return dataFetcherProviderConfig -> new KinesisDataFetcher(kinesisClient, dataFetcherProviderConfig); } - @Override - public GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy(@NonNull final ShardInfo shardInfo, - @NonNull final MetricsFactory metricsFactory) { - final StreamIdentifier streamIdentifier = shardInfo.streamIdentifierSerOpt().isPresent() ? - StreamIdentifier.multiStreamInstance(shardInfo.streamIdentifierSerOpt().get()) : - StreamIdentifier.singleStreamInstance(streamName); - + private GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy( + @NonNull final ShardInfo shardInfo, + @NonNull final StreamIdentifier streamIdentifier, + @NonNull final MetricsFactory metricsFactory) { final DataFetcherProviderConfig kinesisDataFetcherProviderConfig = new KinesisDataFetcherProviderConfig( - streamIdentifier, - shardInfo.shardId(), - metricsFactory, - maxRecords, - kinesisRequestTimeout); + streamIdentifier, shardInfo.shardId(), metricsFactory, maxRecords, kinesisRequestTimeout); final DataFetcher dataFetcher = this.dataFetcherProvider.apply(kinesisDataFetcherProviderConfig); @@ -112,9 +88,14 @@ public class SynchronousBlockingRetrievalFactory implements RetrievalFactory { } @Override - public RecordsPublisher createGetRecordsCache(@NonNull final ShardInfo shardInfo, + public RecordsPublisher createGetRecordsCache( + @NonNull final ShardInfo shardInfo, + @NonNull final StreamConfig streamConfig, @NonNull final MetricsFactory metricsFactory) { - return recordsFetcherFactory.createRecordsFetcher(createGetRecordsRetrievalStrategy(shardInfo, metricsFactory), - shardInfo.shardId(), metricsFactory, maxRecords); + return recordsFetcherFactory.createRecordsFetcher( + createGetRecordsRetrievalStrategy(shardInfo, streamConfig.streamIdentifier(), metricsFactory), + shardInfo.shardId(), + metricsFactory, + maxRecords); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousPrefetchingRetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousPrefetchingRetrievalFactory.java deleted file mode 100644 index efa11e70..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousPrefetchingRetrievalFactory.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2019 Amazon.com, Inc. or its affiliates. - * Licensed under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.kinesis.retrieval.polling; - -import java.time.Duration; -import java.util.concurrent.ExecutorService; -import lombok.NonNull; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.common.StreamIdentifier; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.KinesisDataFetcherProviderConfig; -import software.amazon.kinesis.retrieval.RecordsFetcherFactory; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RetrievalFactory; - -/** - * - */ -@KinesisClientInternalApi -public class SynchronousPrefetchingRetrievalFactory implements RetrievalFactory { - @NonNull - private final String streamName; - @NonNull - private final KinesisAsyncClient kinesisClient; - @NonNull - private final RecordsFetcherFactory recordsFetcherFactory; - private final int maxRecords; - @NonNull - private final ExecutorService executorService; - private final long idleMillisBetweenCalls; - private final Duration maxFutureWait; - - @Deprecated - public SynchronousPrefetchingRetrievalFactory(String streamName, KinesisAsyncClient kinesisClient, - RecordsFetcherFactory recordsFetcherFactory, int maxRecords, ExecutorService executorService, - long idleMillisBetweenCalls) { - this(streamName, kinesisClient, recordsFetcherFactory, maxRecords, executorService, idleMillisBetweenCalls, - PollingConfig.DEFAULT_REQUEST_TIMEOUT); - } - - public SynchronousPrefetchingRetrievalFactory(String streamName, KinesisAsyncClient kinesisClient, - RecordsFetcherFactory recordsFetcherFactory, int maxRecords, ExecutorService executorService, - long idleMillisBetweenCalls, Duration maxFutureWait) { - this.streamName = streamName; - this.kinesisClient = kinesisClient; - this.recordsFetcherFactory = recordsFetcherFactory; - this.maxRecords = maxRecords; - this.executorService = executorService; - this.idleMillisBetweenCalls = idleMillisBetweenCalls; - this.maxFutureWait = maxFutureWait; - } - - @Override public GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy(@NonNull final ShardInfo shardInfo, - @NonNull final MetricsFactory metricsFactory) { - final StreamIdentifier streamIdentifier = shardInfo.streamIdentifierSerOpt().isPresent() ? - StreamIdentifier.multiStreamInstance(shardInfo.streamIdentifierSerOpt().get()) : - StreamIdentifier.singleStreamInstance(streamName); - - return new SynchronousGetRecordsRetrievalStrategy( - new KinesisDataFetcher(kinesisClient, new KinesisDataFetcherProviderConfig( - streamIdentifier, - shardInfo.shardId(), - metricsFactory, - maxRecords, - maxFutureWait - ))); - } - - @Override - public RecordsPublisher createGetRecordsCache(@NonNull final ShardInfo shardInfo, - @NonNull final MetricsFactory metricsFactory) { - return new PrefetchRecordsPublisher(recordsFetcherFactory.maxPendingProcessRecordsInput(), - recordsFetcherFactory.maxByteSize(), recordsFetcherFactory.maxRecordsCount(), maxRecords, - createGetRecordsRetrievalStrategy(shardInfo, metricsFactory), executorService, idleMillisBetweenCalls, - metricsFactory, "Prefetching", shardInfo.shardId()); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/schemaregistry/SchemaRegistryDecoder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/schemaregistry/SchemaRegistryDecoder.java index 76415a85..56742a5e 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/schemaregistry/SchemaRegistryDecoder.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/schemaregistry/SchemaRegistryDecoder.java @@ -1,15 +1,15 @@ package software.amazon.kinesis.schemaregistry; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + import com.amazonaws.services.schemaregistry.common.Schema; import com.amazonaws.services.schemaregistry.deserializers.GlueSchemaRegistryDeserializer; import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.common.KinesisClientLibraryPackage; import software.amazon.kinesis.retrieval.KinesisClientRecord; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - /** * Identifies and decodes Glue Schema Registry data from incoming KinesisClientRecords. */ @@ -18,8 +18,7 @@ public class SchemaRegistryDecoder { private static final String USER_AGENT_APP_NAME = "kcl" + "-" + KinesisClientLibraryPackage.VERSION; private final GlueSchemaRegistryDeserializer glueSchemaRegistryDeserializer; - public SchemaRegistryDecoder( - GlueSchemaRegistryDeserializer glueSchemaRegistryDeserializer) { + public SchemaRegistryDecoder(GlueSchemaRegistryDeserializer glueSchemaRegistryDeserializer) { this.glueSchemaRegistryDeserializer = glueSchemaRegistryDeserializer; this.glueSchemaRegistryDeserializer.overrideUserAgentApp(USER_AGENT_APP_NAME); } @@ -29,8 +28,7 @@ public class SchemaRegistryDecoder { * @param records List * @return List */ - public List decode( - final List records) { + public List decode(final List records) { final List decodedRecords = new ArrayList<>(); for (final KinesisClientRecord record : records) { @@ -58,15 +56,10 @@ public class SchemaRegistryDecoder { final Schema schema = glueSchemaRegistryDeserializer.getSchema(data); final ByteBuffer recordData = ByteBuffer.wrap(glueSchemaRegistryDeserializer.getData(data)); - return - record.toBuilder() - .schema(schema) - .data(recordData) - .build(); + return record.toBuilder().schema(schema).data(recordData).build(); } catch (Exception e) { - log.warn("Unable to decode Glue Schema Registry information from record {}: ", - record.sequenceNumber(), e); - //We ignore Glue Schema Registry failures and return the record. + log.warn("Unable to decode Glue Schema Registry information from record {}: ", record.sequenceNumber(), e); + // We ignore Glue Schema Registry failures and return the record. return record; } } diff --git a/amazon-kinesis-client/src/main/proto/messages.proto b/amazon-kinesis-client/src/main/proto/messages.proto new file mode 100644 index 00000000..eebb32b1 --- /dev/null +++ b/amazon-kinesis-client/src/main/proto/messages.proto @@ -0,0 +1,23 @@ +// Copied from amazon-kinesis-producer/aws/kinesis/protobuf/messages.proto with +// subset of messages that KCL needs + +syntax = "proto2"; +package software.amazon.kinesis.retrieval.kpl; + +message Tag { + required string key = 1; + optional string value = 2; +} + +message Record { + required uint64 partition_key_index = 1; + optional uint64 explicit_hash_key_index = 2; + required bytes data = 3; + repeated Tag tags = 4; +} + +message AggregatedRecord { + repeated string partition_key_table = 1; + repeated string explicit_hash_key_table = 2; + repeated Record records = 3; +} diff --git a/amazon-kinesis-client/src/main/proto/proto.lock b/amazon-kinesis-client/src/main/proto/proto.lock new file mode 100644 index 00000000..1104f4d4 --- /dev/null +++ b/amazon-kinesis-client/src/main/proto/proto.lock @@ -0,0 +1,78 @@ +{ + "definitions": [ + { + "protopath": "messages.proto", + "def": { + "messages": [ + { + "name": "Tag", + "fields": [ + { + "id": 1, + "name": "key", + "type": "string" + }, + { + "id": 2, + "name": "value", + "type": "string" + } + ] + }, + { + "name": "Record", + "fields": [ + { + "id": 1, + "name": "partition_key_index", + "type": "uint64" + }, + { + "id": 2, + "name": "explicit_hash_key_index", + "type": "uint64" + }, + { + "id": 3, + "name": "data", + "type": "bytes" + }, + { + "id": 4, + "name": "tags", + "type": "Tag", + "is_repeated": true + } + ] + }, + { + "name": "AggregatedRecord", + "fields": [ + { + "id": 1, + "name": "partition_key_table", + "type": "string", + "is_repeated": true + }, + { + "id": 2, + "name": "explicit_hash_key_table", + "type": "string", + "is_repeated": true + }, + { + "id": 3, + "name": "records", + "type": "Record", + "is_repeated": true + } + ] + } + ], + "package": { + "name": "software.amazon.kinesis.retrieval.kpl" + } + } + } + ] +} \ No newline at end of file diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestConsumer.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestConsumer.java new file mode 100644 index 00000000..bca8284b --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestConsumer.java @@ -0,0 +1,350 @@ +package software.amazon.kinesis.application; + +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.RandomStringUtils; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse; +import software.amazon.awssdk.services.kinesis.model.PutRecordRequest; +import software.amazon.awssdk.services.kinesis.model.ScalingType; +import software.amazon.awssdk.services.kinesis.model.UpdateShardCountRequest; +import software.amazon.awssdk.services.kinesis.model.UpdateShardCountResponse; +import software.amazon.kinesis.checkpoint.CheckpointConfig; +import software.amazon.kinesis.common.ConfigsBuilder; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import software.amazon.kinesis.config.KCLAppConfig; +import software.amazon.kinesis.config.RetrievalMode; +import software.amazon.kinesis.coordinator.CoordinatorConfig; +import software.amazon.kinesis.coordinator.Scheduler; +import software.amazon.kinesis.leases.LeaseManagementConfig; +import software.amazon.kinesis.lifecycle.LifecycleConfig; +import software.amazon.kinesis.metrics.MetricsConfig; +import software.amazon.kinesis.processor.ProcessorConfig; +import software.amazon.kinesis.retrieval.RetrievalConfig; +import software.amazon.kinesis.utils.LeaseTableManager; +import software.amazon.kinesis.utils.RecordValidationStatus; +import software.amazon.kinesis.utils.ReshardOptions; +import software.amazon.kinesis.utils.StreamExistenceManager; + +import static org.junit.Assume.assumeTrue; + +@Slf4j +public class TestConsumer { + public final KCLAppConfig consumerConfig; + public final Region region; + public final List streamNames; + public final KinesisAsyncClient kinesisClient; + public final KinesisAsyncClient kinesisClientForStreamOwner; + private MetricsConfig metricsConfig; + private RetrievalConfig retrievalConfig; + private CheckpointConfig checkpointConfig; + private CoordinatorConfig coordinatorConfig; + private LeaseManagementConfig leaseManagementConfig; + private LifecycleConfig lifecycleConfig; + private ProcessorConfig processorConfig; + private Scheduler scheduler; + private ScheduledExecutorService producerExecutor; + private ScheduledFuture producerFuture; + private ScheduledExecutorService consumerExecutor; + private ScheduledFuture consumerFuture; + private DynamoDbAsyncClient dynamoClient; + private final ObjectMapper mapper = new ObjectMapper(); + public int successfulPutRecords = 0; + public BigInteger payloadCounter = new BigInteger("0"); + + public TestConsumer(KCLAppConfig consumerConfig) throws Exception { + this.consumerConfig = consumerConfig; + this.region = consumerConfig.getRegion(); + this.streamNames = consumerConfig.getStreamNames(); + this.kinesisClientForStreamOwner = consumerConfig.buildAsyncKinesisClientForStreamOwner(); + this.kinesisClient = consumerConfig.buildAsyncKinesisClientForConsumer(); + this.dynamoClient = consumerConfig.buildAsyncDynamoDbClient(); + } + + public void run() throws Exception { + + // Skip cross account tests if no cross account credentials are provided + if (consumerConfig.isCrossAccount()) { + assumeTrue(consumerConfig.getCrossAccountCredentialsProvider() != null); + } + + final StreamExistenceManager streamExistenceManager = new StreamExistenceManager(this.consumerConfig); + final LeaseTableManager leaseTableManager = new LeaseTableManager(this.dynamoClient); + + // Clean up any old streams or lease tables left in test environment + cleanTestResources(streamExistenceManager, leaseTableManager); + + // Check if stream is created. If not, create it + streamExistenceManager.checkStreamsAndCreateIfNecessary(); + Map streamToConsumerArnsMap = streamExistenceManager.createCrossAccountConsumerIfNecessary(); + + startProducer(); + setUpConsumerResources(streamToConsumerArnsMap); + + try { + startConsumer(); + + // Sleep to allow the producer/consumer to run and then end the test case. + // If non-reshard sleep 3 minutes, else sleep 4 minutes per scale. + final int sleepMinutes = (consumerConfig.getReshardFactorList() == null) + ? 3 + : (4 * consumerConfig.getReshardFactorList().size()); + Thread.sleep(TimeUnit.MINUTES.toMillis(sleepMinutes)); + + // Stops sending dummy data. + stopProducer(); + + // Wait a few seconds for the last few records to be processed + Thread.sleep(TimeUnit.SECONDS.toMillis(10)); + + // Finishes processing current batch of data already received from Kinesis before shutting down. + awaitConsumerFinish(); + + // Validate processed data + validateRecordProcessor(); + + } catch (Exception e) { + // Test Failed. Clean up resources and then throw exception. + log.info("----------Test Failed: Cleaning up resources------------"); + throw e; + } finally { + // Clean up resources created + deleteResources(streamExistenceManager, leaseTableManager); + } + } + + private void cleanTestResources(StreamExistenceManager streamExistenceManager, LeaseTableManager leaseTableManager) + throws Exception { + log.info("----------Before starting, Cleaning test environment----------"); + log.info("----------Deleting all lease tables in account----------"); + leaseTableManager.deleteAllResource(); + log.info("----------Finished deleting all lease tables-------------"); + + log.info("----------Deleting all streams in account----------"); + streamExistenceManager.deleteAllResource(); + log.info("----------Finished deleting all streams-------------"); + } + + private void startProducer() { + this.producerExecutor = Executors.newSingleThreadScheduledExecutor(); + this.producerFuture = producerExecutor.scheduleAtFixedRate(this::publishRecord, 10, 1, TimeUnit.SECONDS); + + // Reshard logic if required for the test + if (consumerConfig.getReshardFactorList() != null) { + log.info("----Reshard Config found: {}", consumerConfig.getReshardFactorList()); + + for (String streamName : consumerConfig.getStreamNames()) { + final StreamScaler streamScaler = new StreamScaler( + kinesisClientForStreamOwner, streamName, consumerConfig.getReshardFactorList(), consumerConfig); + + // Schedule the stream scales 4 minutes apart with 2 minute starting delay + for (int i = 0; i < consumerConfig.getReshardFactorList().size(); i++) { + producerExecutor.schedule(streamScaler, (4 * i) + 2, TimeUnit.MINUTES); + } + } + } + } + + private void setUpConsumerResources(Map streamToConsumerArnsMap) throws Exception { + // Setup configuration of KCL (including DynamoDB and CloudWatch) + final ConfigsBuilder configsBuilder = consumerConfig.getConfigsBuilder(streamToConsumerArnsMap); + + // For polling mode in both CAA and non CAA, set retrievalSpecificConfig to use PollingConfig + // For SingleStreamMode EFO CAA, must set the retrieval config to specify the consumerArn in FanoutConfig + // For MultiStream EFO CAA, the consumerArn can be set in StreamConfig + if (consumerConfig.getRetrievalMode().equals(RetrievalMode.POLLING)) { + retrievalConfig = consumerConfig.getRetrievalConfig(configsBuilder, null); + } else if (consumerConfig.isCrossAccount()) { + retrievalConfig = consumerConfig.getRetrievalConfig(configsBuilder, streamToConsumerArnsMap); + } else { + retrievalConfig = configsBuilder.retrievalConfig(); + } + + checkpointConfig = configsBuilder.checkpointConfig(); + coordinatorConfig = configsBuilder.coordinatorConfig(); + leaseManagementConfig = configsBuilder + .leaseManagementConfig() + .initialPositionInStream( + InitialPositionInStreamExtended.newInitialPosition(consumerConfig.getInitialPosition())) + .initialLeaseTableReadCapacity(50) + .initialLeaseTableWriteCapacity(50); + lifecycleConfig = configsBuilder.lifecycleConfig(); + processorConfig = configsBuilder.processorConfig(); + metricsConfig = configsBuilder.metricsConfig(); + + // Create Scheduler + this.scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); + } + + private void startConsumer() { + // Start record processing of dummy data + this.consumerExecutor = Executors.newSingleThreadScheduledExecutor(); + this.consumerFuture = consumerExecutor.schedule(scheduler, 0, TimeUnit.SECONDS); + } + + private void stopProducer() { + log.info("Cancelling producer and shutting down executor."); + if (producerFuture != null) { + producerFuture.cancel(false); + } + if (producerExecutor != null) { + producerExecutor.shutdown(); + } + } + + public void publishRecord() { + for (String streamName : consumerConfig.getStreamNames()) { + try { + final PutRecordRequest request = PutRecordRequest.builder() + .partitionKey(RandomStringUtils.randomAlphabetic(5, 20)) + .streamName(streamName) + .data(SdkBytes.fromByteBuffer(wrapWithCounter(5, payloadCounter))) // 1024 + // is 1 KB + .build(); + kinesisClientForStreamOwner.putRecord(request).get(); + + // Increment the payload counter if the putRecord call was successful + payloadCounter = payloadCounter.add(new BigInteger("1")); + successfulPutRecords += 1; + log.info( + "---------Record published for stream {}, successfulPutRecords is now: {}", + streamName, + successfulPutRecords); + } catch (InterruptedException e) { + log.info("Interrupted, assuming shutdown. ", e); + } catch (ExecutionException | RuntimeException e) { + log.error("Error during publish records", e); + } + } + } + + private ByteBuffer wrapWithCounter(int payloadSize, BigInteger payloadCounter) throws RuntimeException { + final byte[] returnData; + log.info("---------Putting record with data: {}", payloadCounter); + try { + returnData = mapper.writeValueAsBytes(payloadCounter); + } catch (Exception e) { + throw new RuntimeException("Error converting object to bytes: ", e); + } + return ByteBuffer.wrap(returnData); + } + + private void awaitConsumerFinish() throws Exception { + Future gracefulShutdownFuture = scheduler.startGracefulShutdown(); + log.info("Waiting up to 20 seconds for shutdown to complete."); + try { + gracefulShutdownFuture.get(20, TimeUnit.SECONDS); + } catch (InterruptedException e) { + log.info("Interrupted while waiting for graceful shutdown. Continuing."); + } catch (ExecutionException | TimeoutException e) { + scheduler.shutdown(); + } + log.info("Completed, shutting down now."); + } + + private void validateRecordProcessor() throws Exception { + log.info("The number of expected records is: {}", successfulPutRecords); + final RecordValidationStatus errorVal = + consumerConfig.getRecordValidator().validateRecords(successfulPutRecords); + if (errorVal != RecordValidationStatus.NO_ERROR) { + throw new RuntimeException( + "There was an error validating the records that were processed: " + errorVal.toString()); + } + log.info("---------Completed validation of processed records.---------"); + } + + private void deleteResources(StreamExistenceManager streamExistenceManager, LeaseTableManager leaseTableManager) + throws Exception { + log.info("-------------Start deleting streams.---------"); + for (String streamName : consumerConfig.getStreamNames()) { + log.info("Deleting stream {}", streamName); + streamExistenceManager.deleteResource(streamName); + } + log.info("---------Start deleting lease table.---------"); + leaseTableManager.deleteResource(consumerConfig.getApplicationName()); + log.info("---------Finished deleting resources.---------"); + } + + @Data + private static class StreamScaler implements Runnable { + private final KinesisAsyncClient client; + private final String streamName; + private final List scalingFactors; + private final KCLAppConfig consumerConfig; + private int scalingFactorIdx = 0; + private DescribeStreamSummaryRequest describeStreamSummaryRequest; + + private synchronized void scaleStream() throws InterruptedException, ExecutionException { + final DescribeStreamSummaryResponse response = + client.describeStreamSummary(describeStreamSummaryRequest).get(); + + final int openShardCount = response.streamDescriptionSummary().openShardCount(); + final int targetShardCount = scalingFactors.get(scalingFactorIdx).calculateShardCount(openShardCount); + + log.info( + "Scaling stream {} from {} shards to {} shards w/ scaling factor {}", + streamName, + openShardCount, + targetShardCount, + scalingFactors.get(scalingFactorIdx)); + + final UpdateShardCountRequest updateShardCountRequest = UpdateShardCountRequest.builder() + .streamName(streamName) + .targetShardCount(targetShardCount) + .scalingType(ScalingType.UNIFORM_SCALING) + .build(); + final UpdateShardCountResponse shardCountResponse = + client.updateShardCount(updateShardCountRequest).get(); + log.info("Executed shard scaling request. Response Details : {}", shardCountResponse.toString()); + + scalingFactorIdx++; + } + + @Override + public void run() { + if (scalingFactors.size() == 0 || scalingFactorIdx >= scalingFactors.size()) { + log.info("No scaling factor found in list"); + return; + } + log.info("Starting stream scaling with params : {}", this); + + if (describeStreamSummaryRequest == null) { + describeStreamSummaryRequest = DescribeStreamSummaryRequest.builder() + .streamName(streamName) + .build(); + } + try { + scaleStream(); + } catch (InterruptedException | ExecutionException e) { + log.error("Caught error while scaling shards for stream", e); + } finally { + log.info("Reshard List State : {}", scalingFactors); + } + } + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessor.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessor.java new file mode 100644 index 00000000..6d04afcf --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessor.java @@ -0,0 +1,111 @@ +package software.amazon.kinesis.application; + +import java.nio.ByteBuffer; + +import lombok.extern.slf4j.Slf4j; +import org.slf4j.MDC; +import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.exceptions.InvalidStateException; +import software.amazon.kinesis.exceptions.ShutdownException; +import software.amazon.kinesis.lifecycle.events.InitializationInput; +import software.amazon.kinesis.lifecycle.events.LeaseLostInput; +import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; +import software.amazon.kinesis.lifecycle.events.ShardEndedInput; +import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; +import software.amazon.kinesis.processor.ShardRecordProcessor; +import software.amazon.kinesis.retrieval.KinesisClientRecord; +import software.amazon.kinesis.utils.RecordValidatorQueue; + +/** + * Implement initialization and deletion of shards and shard record processing + */ +@Slf4j +public class TestRecordProcessor implements ShardRecordProcessor { + + private static final String SHARD_ID_MDC_KEY = "ShardId"; + + private StreamIdentifier streamIdentifier; + + private String shardId; + + private final RecordValidatorQueue recordValidator; + + public TestRecordProcessor(StreamIdentifier streamIdentifier, RecordValidatorQueue recordValidator) { + this.recordValidator = recordValidator; + this.streamIdentifier = streamIdentifier; + } + + @Override + public void initialize(InitializationInput initializationInput) { + shardId = initializationInput.shardId(); + MDC.put(SHARD_ID_MDC_KEY, shardId); + try { + log.info("Initializing @ Sequence: {}", initializationInput.extendedSequenceNumber()); + } finally { + MDC.remove(SHARD_ID_MDC_KEY); + } + } + + @Override + public void processRecords(ProcessRecordsInput processRecordsInput) { + MDC.put(SHARD_ID_MDC_KEY, shardId); + try { + log.info("Processing {} record(s)", processRecordsInput.records().size()); + + for (KinesisClientRecord kinesisRecord : processRecordsInput.records()) { + final String data = new String(asByteArray(kinesisRecord.data())); + log.info("Processing record pk for stream {}: {}", streamIdentifier.streamName(), data); + String recordValidatorKey = streamIdentifier.toString() + "-" + shardId; + recordValidator.add(recordValidatorKey, data); + } + + } catch (Throwable t) { + log.error("Caught throwable while processing records. Aborting.", t); + Runtime.getRuntime().halt(1); + } finally { + MDC.remove(SHARD_ID_MDC_KEY); + } + } + + public static byte[] asByteArray(ByteBuffer buf) { + byte[] bytes = new byte[buf.remaining()]; + buf.get(bytes); + return bytes; + } + + @Override + public void leaseLost(LeaseLostInput leaseLostInput) { + MDC.put(SHARD_ID_MDC_KEY, shardId); + try { + log.info("Lost lease, so terminating."); + } finally { + MDC.remove(SHARD_ID_MDC_KEY); + } + } + + @Override + public void shardEnded(ShardEndedInput shardEndedInput) { + MDC.put(SHARD_ID_MDC_KEY, shardId); + try { + log.info("Reached shard end checkpointing."); + shardEndedInput.checkpointer().checkpoint(); + } catch (ShutdownException | InvalidStateException e) { + log.error("Exception while checkpointing at shard end. Giving up.", e); + } finally { + MDC.remove(SHARD_ID_MDC_KEY); + } + } + + @Override + public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) { + MDC.put(SHARD_ID_MDC_KEY, shardId); + try { + log.info("Scheduler is shutting down, checkpointing."); + shutdownRequestedInput.checkpointer().checkpoint(); + } catch (ShutdownException | InvalidStateException e) { + log.error("Exception while checkpointing at requested shutdown. Giving up.", e); + } finally { + MDC.remove(SHARD_ID_MDC_KEY); + } + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessorFactory.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessorFactory.java new file mode 100644 index 00000000..e36d9ba2 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessorFactory.java @@ -0,0 +1,25 @@ +package software.amazon.kinesis.application; + +import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.processor.ShardRecordProcessor; +import software.amazon.kinesis.processor.ShardRecordProcessorFactory; +import software.amazon.kinesis.utils.RecordValidatorQueue; + +public class TestRecordProcessorFactory implements ShardRecordProcessorFactory { + + private final RecordValidatorQueue recordValidator; + + public TestRecordProcessorFactory(RecordValidatorQueue recordValidator) { + this.recordValidator = recordValidator; + } + + @Override + public ShardRecordProcessor shardRecordProcessor() { + return new TestRecordProcessor(null, this.recordValidator); + } + + @Override + public ShardRecordProcessor shardRecordProcessor(StreamIdentifier streamIdentifier) { + return new TestRecordProcessor(streamIdentifier, this.recordValidator); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java index b823c8e3..b618aa7a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java @@ -17,7 +17,6 @@ package software.amazon.kinesis.checkpoint; import org.junit.Assert; import org.junit.Before; import org.junit.Test; - import software.amazon.kinesis.processor.Checkpointer; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; @@ -37,20 +36,20 @@ public class CheckpointerTest { @Test public final void testInitialSetCheckpoint() throws Exception { - String sequenceNumber = "1"; + String sequenceNumber = "1"; String shardId = "myShardId"; - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId); Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint); } - + @Test public final void testAdvancingSetCheckpoint() throws Exception { String shardId = "myShardId"; for (Integer i = 0; i < 10; i++) { - String sequenceNumber = i.toString(); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); + String sequenceNumber = i.toString(); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId); Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint); @@ -67,10 +66,11 @@ public class CheckpointerTest { String checkpointValue = "12345"; String shardId = "testShardId-1"; String concurrencyToken = "token-1"; - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(checkpointValue); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(checkpointValue); checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken); Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); } @@ -86,11 +86,14 @@ public class CheckpointerTest { checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), testConcurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); } - @Test public final void testInitialPrepareCheckpointWithApplicationState() throws Exception { String sequenceNumber = "1"; @@ -101,13 +104,18 @@ public class CheckpointerTest { checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); ExtendedSequenceNumber extendedPendingCheckpointNumber = new ExtendedSequenceNumber(pendingCheckpointValue); - checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), testConcurrencyToken, - applicationState); + checkpoint.prepareCheckpoint( + shardId, new ExtendedSequenceNumber(pendingCheckpointValue), testConcurrencyToken, applicationState); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - Assert.assertEquals(applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); } @Test @@ -122,8 +130,12 @@ public class CheckpointerTest { ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedSequenceNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); } } @@ -138,12 +150,17 @@ public class CheckpointerTest { for (Integer i = 0; i < 10; i++) { String sequenceNumber = i.toString(); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); - checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken, - applicationState); + checkpoint.prepareCheckpoint( + shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken, applicationState); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - Assert.assertEquals(applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedSequenceNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); } } @@ -158,20 +175,28 @@ public class CheckpointerTest { ExtendedSequenceNumber extendedCheckpointNumber = new ExtendedSequenceNumber(checkpointValue); checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); // prepare checkpoint ExtendedSequenceNumber extendedPendingCheckpointNumber = new ExtendedSequenceNumber(pendingCheckpointValue); checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); // do checkpoint checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken); Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); } @@ -187,21 +212,31 @@ public class CheckpointerTest { ExtendedSequenceNumber extendedCheckpointNumber = new ExtendedSequenceNumber(checkpointValue); checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); // prepare checkpoint ExtendedSequenceNumber extendedPendingCheckpointNumber = new ExtendedSequenceNumber(pendingCheckpointValue); - checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken, applicationState); + checkpoint.prepareCheckpoint( + shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken, applicationState); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - Assert.assertEquals(applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); // do checkpoint checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken); Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java index 8f6e165d..635678f0 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java @@ -17,12 +17,10 @@ package software.amazon.kinesis.checkpoint; import java.util.HashMap; import java.util.Map; -import software.amazon.kinesis.exceptions.KinesisClientLibException; +import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.processor.Checkpointer; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import lombok.extern.slf4j.Slf4j; - /** * Everything is stored in memory and there is no fault-tolerance. */ @@ -39,8 +37,7 @@ public class InMemoryCheckpointer implements Checkpointer { * {@inheritDoc} */ @Override - public void setCheckpoint(String leaseKey, ExtendedSequenceNumber checkpointValue, String concurrencyToken) - throws KinesisClientLibException { + public void setCheckpoint(String leaseKey, ExtendedSequenceNumber checkpointValue, String concurrencyToken) { checkpoints.put(leaseKey, checkpointValue); flushpoints.put(leaseKey, checkpointValue); pendingCheckpoints.remove(leaseKey); @@ -49,33 +46,35 @@ public class InMemoryCheckpointer implements Checkpointer { if (log.isDebugEnabled()) { log.debug("shardId: {} checkpoint: {}", leaseKey, checkpointValue); } - } /** * {@inheritDoc} */ @Override - public ExtendedSequenceNumber getCheckpoint(String leaseKey) throws KinesisClientLibException { + public ExtendedSequenceNumber getCheckpoint(String leaseKey) { ExtendedSequenceNumber checkpoint = flushpoints.get(leaseKey); log.debug("checkpoint shardId: {} checkpoint: {}", leaseKey, checkpoint); return checkpoint; } @Override - public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken) - throws KinesisClientLibException { + public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken) { prepareCheckpoint(leaseKey, pendingCheckpoint, concurrencyToken, null); } @Override - public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, byte[] pendingCheckpointState) throws KinesisClientLibException { + public void prepareCheckpoint( + String leaseKey, + ExtendedSequenceNumber pendingCheckpoint, + String concurrencyToken, + byte[] pendingCheckpointState) { pendingCheckpoints.put(leaseKey, pendingCheckpoint); pendingCheckpointStates.put(leaseKey, pendingCheckpointState); } @Override - public Checkpoint getCheckpointObject(String leaseKey) throws KinesisClientLibException { + public Checkpoint getCheckpointObject(String leaseKey) { ExtendedSequenceNumber checkpoint = flushpoints.get(leaseKey); ExtendedSequenceNumber pendingCheckpoint = pendingCheckpoints.get(leaseKey); byte[] pendingCheckpointState = pendingCheckpointStates.get(leaseKey); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java index ab23e0b4..d2ab2601 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java @@ -14,11 +14,11 @@ */ package software.amazon.kinesis.checkpoint; +import java.util.Optional; + import org.junit.Before; import org.junit.Test; -import java.util.Optional; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertThat; @@ -32,7 +32,6 @@ public class SequenceNumberValidatorTest { validator = new SequenceNumberValidator(); } - @Test public void matchingSequenceNumberTest() { String sequenceNumber = "49587497311274533994574834252742144236107130636007899138"; @@ -44,7 +43,8 @@ public class SequenceNumberValidatorTest { Optional shardId = validator.shardIdFor(sequenceNumber); assertThat(shardId, equalTo(Optional.of(expectedShardId))); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.of(true))); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.of(true))); } @Test @@ -58,7 +58,8 @@ public class SequenceNumberValidatorTest { Optional shardId = validator.shardIdFor(sequenceNumber); assertThat(shardId, not(equalTo(invalidShardId))); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, invalidShardId), equalTo(Optional.of(false))); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, invalidShardId), equalTo(Optional.of(false))); } @Test @@ -72,7 +73,8 @@ public class SequenceNumberValidatorTest { Optional shardId = validator.shardIdFor(sequenceNumber); assertThat(shardId, equalTo(Optional.empty())); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); } @Test @@ -83,7 +85,8 @@ public class SequenceNumberValidatorTest { assertThat(validator.versionFor(sequenceNumber), equalTo(Optional.empty())); assertThat(validator.shardIdFor(sequenceNumber), equalTo(Optional.empty())); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); } @Test @@ -94,8 +97,7 @@ public class SequenceNumberValidatorTest { assertThat(validator.versionFor(sequenceNumber), equalTo(Optional.empty())); assertThat(validator.shardIdFor(sequenceNumber), equalTo(Optional.empty())); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); } - - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java index e51616d9..523324d2 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java @@ -14,12 +14,12 @@ */ package software.amazon.kinesis.checkpoint; -import software.amazon.kinesis.processor.PreparedCheckpointer; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.kinesis.processor.PreparedCheckpointer; +import software.amazon.kinesis.processor.RecordProcessorCheckpointer; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; public class ShardPreparedCheckpointerTest { @@ -60,4 +60,4 @@ public class ShardPreparedCheckpointerTest { // nothing happens here checkpointer.checkpoint(); } -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java index 2ff82004..a198dcef 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java @@ -14,11 +14,6 @@ */ package software.amazon.kinesis.checkpoint; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; @@ -28,13 +23,17 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.model.Record; import software.amazon.kinesis.leases.ShardInfo; import software.amazon.kinesis.processor.Checkpointer; import software.amazon.kinesis.processor.PreparedCheckpointer; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + /** * */ @@ -88,62 +87,62 @@ public class ShardShardRecordProcessorCheckpointerTest { /** * Test method for * {@link ShardRecordProcessorCheckpointer#checkpoint(Record record)}. - */ + */ @Test public final void testCheckpointRecord() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = + ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5025"); - Record record = makeRecord("5025"); + processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5025"); + Record record = makeRecord("5025"); processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); processingCheckpointer.checkpoint(record); assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); } - + /** * Test method for * {@link ShardRecordProcessorCheckpointer#checkpoint(Record record)}. */ @Test public final void testCheckpointSubRecord() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = + ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030"); - Record record = makeRecord("5030"); - //UserRecord subRecord = new UserRecord(record); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); + processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030"); + Record record = makeRecord("5030"); + // UserRecord subRecord = new UserRecord(record); + processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); processingCheckpointer.checkpoint(record); assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); } - + /** * Test method for * {@link ShardRecordProcessorCheckpointer#checkpoint(String sequenceNumber)}. */ @Test public final void testCheckpointSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = + ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035"); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); + processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035"); + processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); processingCheckpointer.checkpoint("5035"); assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); } - + /** * Test method for * {@link ShardRecordProcessorCheckpointer#checkpoint(String sequenceNumber, long subSequenceNumber)}. */ @Test public final void testCheckpointExtendedSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = + ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040"); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); + processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040"); + processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); processingCheckpointer.checkpoint("5040", 0); assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); } @@ -162,7 +161,6 @@ public class ShardShardRecordProcessorCheckpointerTest { assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); } - /** * Test method for * {@link ShardRecordProcessorCheckpointer#prepareCheckpoint()}. @@ -231,7 +229,7 @@ public class ShardShardRecordProcessorCheckpointerTest { processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030"); Record record = makeRecord("5030"); - //UserRecord subRecord = new UserRecord(record); + // UserRecord subRecord = new UserRecord(record); processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint(record); assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); @@ -252,7 +250,8 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testPrepareCheckpointSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035"); processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); @@ -275,7 +274,8 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testPrepareCheckpointExtendedSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040"); processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); @@ -297,11 +297,13 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testPrepareCheckpointAtShardEnd() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); ExtendedSequenceNumber extendedSequenceNumber = ExtendedSequenceNumber.SHARD_END; processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint(ExtendedSequenceNumber.SHARD_END.sequenceNumber()); + PreparedCheckpointer preparedCheckpoint = + processingCheckpointer.prepareCheckpoint(ExtendedSequenceNumber.SHARD_END.sequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(startingExtendedSequenceNumber)); assertThat(preparedCheckpoint.pendingCheckpoint(), equalTo(extendedSequenceNumber)); @@ -314,13 +316,13 @@ public class ShardShardRecordProcessorCheckpointerTest { assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); } - /** * Test that having multiple outstanding prepared checkpointers works if they are redeemed in the right order. */ @Test public final void testMultipleOutstandingCheckpointersHappyCase() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber("6040")); @@ -351,7 +353,8 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testMultipleOutstandingCheckpointersOutOfOrder() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber("7040")); @@ -397,14 +400,15 @@ public class ShardShardRecordProcessorCheckpointerTest { assertThat(checkpointer.largestPermittedCheckpointValue(), equalTo(sequenceNumber)); } - /* + /** * This test is a mixed test of checking some basic functionality of checkpointing at a sequence number and making * sure certain bounds checks and validations are being performed inside the checkpointer to prevent clients from * checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be checkpointing */ @Test public final void testClientSpecifiedCheckpoint() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); // Several checkpoints we're gonna hit ExtendedSequenceNumber tooSmall = new ExtendedSequenceNumber("2"); @@ -426,58 +430,71 @@ public class ShardShardRecordProcessorCheckpointerTest { } // advance to first - processingCheckpointer.checkpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); - processingCheckpointer.checkpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); // advance to second - processingCheckpointer.checkpoint(secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); - ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = - { tooSmall, // Shouldn't be able to move before the first value we ever checkpointed - firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number - tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer - lastSequenceNumberOfShard, // Just another big value that we will use later - null, // Not a valid sequence number - new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string - ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max - ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value - ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value + ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = { + tooSmall, // Shouldn't be able to move before the first value we ever checkpointed + firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number + tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer + lastSequenceNumberOfShard, // Just another big value that we will use later + null, // Not a valid sequence number + new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string + ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max + ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value + ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value }; for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) { try { - processingCheckpointer.checkpoint(badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); + processingCheckpointer.checkpoint( + badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); fail("checkpointing at bad or out of order sequence didn't throw exception"); } catch (IllegalArgumentException e) { } catch (NullPointerException e) { - + } - assertThat("Checkpoint value should not have changed", checkpoint.getCheckpoint(shardId), + assertThat( + "Checkpoint value should not have changed", + checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); - assertThat("Last checkpoint value should not have changed", processingCheckpointer.lastCheckpointValue(), + assertThat( + "Last checkpoint value should not have changed", + processingCheckpointer.lastCheckpointValue(), equalTo(secondSequenceNumber)); - assertThat("Largest sequence number should not have changed", - processingCheckpointer.largestPermittedCheckpointValue(), equalTo(thirdSequenceNumber)); + assertThat( + "Largest sequence number should not have changed", + processingCheckpointer.largestPermittedCheckpointValue(), + equalTo(thirdSequenceNumber)); } // advance to third number - processingCheckpointer.checkpoint(thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(thirdSequenceNumber)); // Testing a feature that prevents checkpointing at SHARD_END twice processingCheckpointer.largestPermittedCheckpointValue(lastSequenceNumberOfShard); processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer.largestPermittedCheckpointValue()); processingCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END); - processingCheckpointer.checkpoint(lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); + processingCheckpointer.checkpoint( + lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); assertThat( "Checkpoing at the sequence number at the end of a shard should be the same as checkpointing at SHARD_END", - processingCheckpointer.lastCheckpointValue(), equalTo(ExtendedSequenceNumber.SHARD_END)); + processingCheckpointer.lastCheckpointValue(), + equalTo(ExtendedSequenceNumber.SHARD_END)); } - /* + /** * This test is a mixed test of checking some basic functionality of two phase checkpointing at a sequence number * and making sure certain bounds checks and validations are being performed inside the checkpointer to prevent * clients from checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be @@ -485,7 +502,8 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testClientSpecifiedTwoPhaseCheckpoint() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); // Several checkpoints we're gonna hit ExtendedSequenceNumber tooSmall = new ExtendedSequenceNumber("2"); @@ -514,12 +532,13 @@ public class ShardShardRecordProcessorCheckpointerTest { } // advance to first - processingCheckpointer.checkpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); // prepare checkpoint at initial checkpoint value - PreparedCheckpointer doesNothingPreparedCheckpoint = - processingCheckpointer.prepareCheckpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); + PreparedCheckpointer doesNothingPreparedCheckpoint = processingCheckpointer.prepareCheckpoint( + firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); assertThat(doesNothingPreparedCheckpoint instanceof DoesNothingPreparedCheckpointer, equalTo(true)); assertThat(doesNothingPreparedCheckpoint.pendingCheckpoint(), equalTo(firstSequenceNumber)); assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); @@ -533,64 +552,80 @@ public class ShardShardRecordProcessorCheckpointerTest { assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); // advance to second - processingCheckpointer.prepareCheckpoint(secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); + processingCheckpointer.prepareCheckpoint( + secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(secondSequenceNumber)); - processingCheckpointer.checkpoint(secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = - { tooSmall, // Shouldn't be able to move before the first value we ever checkpointed - firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number - tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer - lastSequenceNumberOfShard, // Just another big value that we will use later - null, // Not a valid sequence number - new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string - ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max - ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value - ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value - }; + ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = { + tooSmall, // Shouldn't be able to move before the first value we ever checkpointed + firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number + tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer + lastSequenceNumberOfShard, // Just another big value that we will use later + null, // Not a valid sequence number + new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string + ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max + ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value + ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value + }; for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) { try { - processingCheckpointer.prepareCheckpoint(badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); + processingCheckpointer.prepareCheckpoint( + badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); fail("checkpointing at bad or out of order sequence didn't throw exception"); } catch (IllegalArgumentException e) { } catch (NullPointerException e) { } - assertThat("Checkpoint value should not have changed", checkpoint.getCheckpoint(shardId), + assertThat( + "Checkpoint value should not have changed", + checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); - assertThat("Last checkpoint value should not have changed", processingCheckpointer.lastCheckpointValue(), + assertThat( + "Last checkpoint value should not have changed", + processingCheckpointer.lastCheckpointValue(), equalTo(secondSequenceNumber)); - assertThat("Largest sequence number should not have changed", - processingCheckpointer.largestPermittedCheckpointValue(), equalTo(thirdSequenceNumber)); + assertThat( + "Largest sequence number should not have changed", + processingCheckpointer.largestPermittedCheckpointValue(), + equalTo(thirdSequenceNumber)); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - } // advance to third number - processingCheckpointer.prepareCheckpoint(thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); + processingCheckpointer.prepareCheckpoint( + thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(thirdSequenceNumber)); - processingCheckpointer.checkpoint(thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(thirdSequenceNumber)); // Testing a feature that prevents checkpointing at SHARD_END twice processingCheckpointer.largestPermittedCheckpointValue(lastSequenceNumberOfShard); processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer.largestPermittedCheckpointValue()); processingCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END); - processingCheckpointer.prepareCheckpoint(lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); + processingCheckpointer.prepareCheckpoint( + lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); assertThat( "Preparing a checkpoing at the sequence number at the end of a shard should be the same as preparing a checkpoint at SHARD_END", - checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(ExtendedSequenceNumber.SHARD_END)); + checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), + equalTo(ExtendedSequenceNumber.SHARD_END)); } private enum CheckpointAction { - NONE, NO_SEQUENCE_NUMBER, WITH_SEQUENCE_NUMBER; + NONE, + NO_SEQUENCE_NUMBER, + WITH_SEQUENCE_NUMBER; } private enum CheckpointerType { - CHECKPOINTER, PREPARED_CHECKPOINTER, PREPARE_THEN_CHECKPOINTER; + CHECKPOINTER, + PREPARED_CHECKPOINTER, + PREPARE_THEN_CHECKPOINTER; } /** @@ -601,11 +636,11 @@ public class ShardShardRecordProcessorCheckpointerTest { * * @throws Exception */ - @SuppressWarnings("serial") @Test public final void testMixedCheckpointCalls() throws Exception { for (LinkedHashMap testPlan : getMixedCallsTestPlan()) { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); testMixedCheckpointCalls(processingCheckpointer, testPlan, CheckpointerType.CHECKPOINTER); } } @@ -617,11 +652,11 @@ public class ShardShardRecordProcessorCheckpointerTest { * * @throws Exception */ - @SuppressWarnings("serial") @Test public final void testMixedTwoPhaseCheckpointCalls() throws Exception { for (LinkedHashMap testPlan : getMixedCallsTestPlan()) { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); testMixedCheckpointCalls(processingCheckpointer, testPlan, CheckpointerType.PREPARED_CHECKPOINTER); } } @@ -638,13 +673,15 @@ public class ShardShardRecordProcessorCheckpointerTest { @Test public final void testMixedTwoPhaseCheckpointCalls2() throws Exception { for (LinkedHashMap testPlan : getMixedCallsTestPlan()) { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); testMixedCheckpointCalls(processingCheckpointer, testPlan, CheckpointerType.PREPARE_THEN_CHECKPOINTER); } } private List> getMixedCallsTestPlan() { - List> testPlans = new ArrayList>(); + List> testPlans = + new ArrayList>(); /* * Simulate a scenario where the checkpointer is created at "latest". @@ -716,9 +753,11 @@ public class ShardShardRecordProcessorCheckpointerTest { * A map describing which checkpoint value to set in the checkpointer, and what action to take * @throws Exception */ - private void testMixedCheckpointCalls(ShardRecordProcessorCheckpointer processingCheckpointer, + private void testMixedCheckpointCalls( + ShardRecordProcessorCheckpointer processingCheckpointer, LinkedHashMap checkpointValueAndAction, - CheckpointerType checkpointerType) throws Exception { + CheckpointerType checkpointerType) + throws Exception { for (Entry entry : checkpointValueAndAction.entrySet()) { PreparedCheckpointer preparedCheckpoint = null; @@ -726,61 +765,68 @@ public class ShardShardRecordProcessorCheckpointerTest { if (SentinelCheckpoint.SHARD_END.toString().equals(entry.getKey())) { // Before shard end, we will pretend to do what we expect the shutdown task to do - processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer - .largestPermittedCheckpointValue()); + processingCheckpointer.sequenceNumberAtShardEnd( + processingCheckpointer.largestPermittedCheckpointValue()); } // Advance the largest checkpoint and check that it is updated. processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber(entry.getKey())); - assertThat("Expected the largest checkpoint value to be updated after setting it", + assertThat( + "Expected the largest checkpoint value to be updated after setting it", processingCheckpointer.largestPermittedCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); switch (entry.getValue()) { - case NONE: - // We were told to not checkpoint, so lets just make sure the last checkpoint value is the same as - // when this block started then continue to the next instruction - assertThat("Expected the last checkpoint value to stay the same if we didn't checkpoint", - processingCheckpointer.lastCheckpointValue(), equalTo(lastCheckpointValue)); - continue; - case NO_SEQUENCE_NUMBER: - switch (checkpointerType) { - case CHECKPOINTER: - processingCheckpointer.checkpoint(); - break; - case PREPARED_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); - preparedCheckpoint.checkpoint(); - case PREPARE_THEN_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); - processingCheckpointer.checkpoint( - preparedCheckpoint.pendingCheckpoint().sequenceNumber(), - preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); - } - break; - case WITH_SEQUENCE_NUMBER: - switch (checkpointerType) { - case CHECKPOINTER: - processingCheckpointer.checkpoint(entry.getKey()); - break; - case PREPARED_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); - preparedCheckpoint.checkpoint(); - case PREPARE_THEN_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); - processingCheckpointer.checkpoint( - preparedCheckpoint.pendingCheckpoint().sequenceNumber(), - preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); - } - break; + case NONE: + // We were told to not checkpoint, so lets just make sure the last checkpoint value is the same as + // when this block started then continue to the next instruction + assertThat( + "Expected the last checkpoint value to stay the same if we didn't checkpoint", + processingCheckpointer.lastCheckpointValue(), + equalTo(lastCheckpointValue)); + continue; + case NO_SEQUENCE_NUMBER: + switch (checkpointerType) { + case CHECKPOINTER: + processingCheckpointer.checkpoint(); + break; + case PREPARED_CHECKPOINTER: + preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); + preparedCheckpoint.checkpoint(); + case PREPARE_THEN_CHECKPOINTER: + preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); + processingCheckpointer.checkpoint( + preparedCheckpoint.pendingCheckpoint().sequenceNumber(), + preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); + } + break; + case WITH_SEQUENCE_NUMBER: + switch (checkpointerType) { + case CHECKPOINTER: + processingCheckpointer.checkpoint(entry.getKey()); + break; + case PREPARED_CHECKPOINTER: + preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); + preparedCheckpoint.checkpoint(); + case PREPARE_THEN_CHECKPOINTER: + preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); + processingCheckpointer.checkpoint( + preparedCheckpoint.pendingCheckpoint().sequenceNumber(), + preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); + } + break; } // We must have checkpointed to get here, so let's make sure our last checkpoint value is up to date - assertThat("Expected the last checkpoint value to change after checkpointing", - processingCheckpointer.lastCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); - assertThat("Expected the largest checkpoint value to remain the same since the last set", + assertThat( + "Expected the last checkpoint value to change after checkpointing", + processingCheckpointer.lastCheckpointValue(), + equalTo(new ExtendedSequenceNumber(entry.getKey()))); + assertThat( + "Expected the largest checkpoint value to remain the same since the last set", processingCheckpointer.largestPermittedCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); assertThat(checkpoint.getCheckpoint(shardId), equalTo(new ExtendedSequenceNumber(entry.getKey()))); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), + assertThat( + checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); } @@ -800,7 +846,8 @@ public class ShardShardRecordProcessorCheckpointerTest { @Test public final void testSetMetricsScopeDuringCheckpointing() throws Exception { // First call to checkpoint - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("5019"); processingCheckpointer.largestPermittedCheckpointValue(sequenceNumber); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/ConfigsBuilderTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/ConfigsBuilderTest.java index 8ea8f818..1343285e 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/ConfigsBuilderTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/ConfigsBuilderTest.java @@ -18,14 +18,12 @@ package software.amazon.kinesis.common; import java.util.Arrays; import java.util.Optional; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.mockito.Mockito.mock; - -import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.MockitoAnnotations; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; @@ -34,6 +32,11 @@ import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.processor.SingleStreamTracker; import software.amazon.kinesis.processor.StreamTracker; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.mockito.Mockito.mock; + +@RunWith(MockitoJUnitRunner.class) public class ConfigsBuilderTest { @Mock @@ -51,39 +54,82 @@ public class ConfigsBuilderTest { private static final String APPLICATION_NAME = ConfigsBuilderTest.class.getSimpleName(); private static final String WORKER_IDENTIFIER = "worker-id"; - @Before - public void setUp() { - MockitoAnnotations.initMocks(this); + @Test + public void testSingleStreamTrackerConstruction() { + final String streamName = "single-stream"; + final Arn streamArn = createArn(streamName); + + for (final ConfigsBuilder cb : Arrays.asList( + createConfig(streamName), + createConfig(new SingleStreamTracker(streamName)), + createConfig(streamArn), + createConfig(new SingleStreamTracker(streamArn)))) { + assertEquals(Optional.empty(), cb.appStreamTracker().left()); + assertEquals(streamName, cb.appStreamTracker().right().get()); + assertEquals( + streamName, + cb.streamTracker() + .streamConfigList() + .get(0) + .streamIdentifier() + .streamName()); + assertFalse(cb.streamTracker().isMultiStream()); + } } @Test - public void testTrackerConstruction() { - final String streamName = "single-stream"; - final ConfigsBuilder configByName = createConfig(streamName); - final ConfigsBuilder configBySingleTracker = createConfig(new SingleStreamTracker(streamName)); - - for (final ConfigsBuilder cb : Arrays.asList(configByName, configBySingleTracker)) { - assertEquals(Optional.empty(), cb.appStreamTracker().left()); - assertEquals(streamName, cb.appStreamTracker().right().get()); - assertEquals(streamName, cb.streamTracker().streamConfigList().get(0).streamIdentifier().streamName()); - assertFalse(cb.streamTracker().isMultiStream()); - } - + public void testMultiStreamTrackerConstruction() { final StreamTracker mockMultiStreamTracker = mock(MultiStreamTracker.class); final ConfigsBuilder configByMultiTracker = createConfig(mockMultiStreamTracker); assertEquals(Optional.empty(), configByMultiTracker.appStreamTracker().right()); - assertEquals(mockMultiStreamTracker, configByMultiTracker.appStreamTracker().left().get()); + assertEquals( + mockMultiStreamTracker, + configByMultiTracker.appStreamTracker().left().get()); assertEquals(mockMultiStreamTracker, configByMultiTracker.streamTracker()); } private ConfigsBuilder createConfig(String streamName) { - return new ConfigsBuilder(streamName, APPLICATION_NAME, mockKinesisClient, mockDynamoClient, - mockCloudWatchClient, WORKER_IDENTIFIER, mockShardProcessorFactory); + // intentional invocation of constructor where streamName is a String + return new ConfigsBuilder( + streamName, + APPLICATION_NAME, + mockKinesisClient, + mockDynamoClient, + mockCloudWatchClient, + WORKER_IDENTIFIER, + mockShardProcessorFactory); + } + + private ConfigsBuilder createConfig(Arn streamArn) { + // intentional invocation of constructor where streamArn is an Arn + return new ConfigsBuilder( + streamArn, + APPLICATION_NAME, + mockKinesisClient, + mockDynamoClient, + mockCloudWatchClient, + WORKER_IDENTIFIER, + mockShardProcessorFactory); } private ConfigsBuilder createConfig(StreamTracker streamTracker) { - return new ConfigsBuilder(streamTracker, APPLICATION_NAME, mockKinesisClient, mockDynamoClient, - mockCloudWatchClient, WORKER_IDENTIFIER, mockShardProcessorFactory); + return new ConfigsBuilder( + streamTracker, + APPLICATION_NAME, + mockKinesisClient, + mockDynamoClient, + mockCloudWatchClient, + WORKER_IDENTIFIER, + mockShardProcessorFactory); } -} \ No newline at end of file + private static Arn createArn(String streamName) { + return Arn.builder() + .partition("aws") + .service("kinesis") + .region(Region.US_EAST_1.id()) + .accountId("123456789012") + .resource("stream/" + streamName) + .build(); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/DeprecationUtilsTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/DeprecationUtilsTest.java index 39991b78..8ed46d0b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/DeprecationUtilsTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/DeprecationUtilsTest.java @@ -17,15 +17,15 @@ package software.amazon.kinesis.common; import java.util.function.Function; -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; - import org.junit.Test; import software.amazon.awssdk.utils.Either; import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.SingleStreamTracker; import software.amazon.kinesis.processor.StreamTracker; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; + public class DeprecationUtilsTest { @Test @@ -41,5 +41,4 @@ public class DeprecationUtilsTest { public void testUnsupportedStreamTrackerConversion() { DeprecationUtils.convert(mock(StreamTracker.class), Function.identity()); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/FutureUtilsTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/FutureUtilsTest.java index 558687cd..1911d537 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/FutureUtilsTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/FutureUtilsTest.java @@ -14,6 +14,10 @@ */ package software.amazon.kinesis.common; +import java.time.Duration; +import java.util.concurrent.Future; +import java.util.concurrent.TimeoutException; + import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -21,10 +25,6 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; -import java.time.Duration; -import java.util.concurrent.Future; -import java.util.concurrent.TimeoutException; - import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.eq; @@ -52,4 +52,4 @@ public class FutureUtilsTest { verify(future).cancel(eq(true)); } } -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamConfigTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamConfigTest.java new file mode 100644 index 00000000..ff004304 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamConfigTest.java @@ -0,0 +1,13 @@ +package software.amazon.kinesis.common; + +import org.junit.Test; + +import static software.amazon.kinesis.common.InitialPositionInStream.TRIM_HORIZON; + +public class StreamConfigTest { + + @Test(expected = NullPointerException.class) + public void testNullStreamIdentifier() { + new StreamConfig(null, InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamIdentifierTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamIdentifierTest.java new file mode 100644 index 00000000..d2779c3c --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamIdentifierTest.java @@ -0,0 +1,156 @@ +package software.amazon.kinesis.common; + +import java.util.Arrays; +import java.util.Optional; + +import org.junit.Assert; +import org.junit.Test; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.regions.Region; + +import static org.junit.Assert.assertEquals; + +public class StreamIdentifierTest { + private static final String STREAM_NAME = "stream-name"; + private static final String PARTITION = "aws"; + private static final String SERVICE = "kinesis"; + private static final Region KINESIS_REGION = Region.US_WEST_1; + private static final String TEST_ACCOUNT_ID = "123456789012"; + private static final String RESOURCE = "stream/" + STREAM_NAME; + private static final long EPOCH = 1680616058L; + private static final Arn DEFAULT_ARN = createArn(); + + /** + * Test patterns that should match a serialization regex. + */ + @Test + public void testMultiStreamDeserializationSuccess() { + final StreamIdentifier siSerialized = StreamIdentifier.multiStreamInstance(serialize()); + assertEquals(Optional.of(EPOCH), siSerialized.streamCreationEpochOptional()); + assertActualStreamIdentifierExpected(null, siSerialized); + } + + /** + * Test patterns that should not match a serialization regex. + */ + @Test + public void testMultiStreamDeserializationFail() { + for (final String pattern : Arrays.asList( + ":stream-name:123", // missing account id + // "123456789:stream-name:123", // account id not 12 digits + "123456789abc:stream-name:123", // 12char alphanumeric account id + "123456789012::123", // missing stream name + "123456789012:stream-name", // missing delimiter and creation epoch + "123456789012:stream-name:", // missing creation epoch + "123456789012:stream-name:-123", // negative creation epoch + "123456789012:stream-name:abc", // non-numeric creation epoch + "", + "::" // missing account id, stream name, and epoch + )) { + try { + StreamIdentifier.multiStreamInstance(pattern); + Assert.fail("Serialization " + pattern + " should not have created a StreamIdentifier"); + } catch (final IllegalArgumentException iae) { + // expected; ignore + } + } + } + + /** + * Test ARNs that should not match a valid AWS Kinesis stream ARN. + */ + @Test + public void testMultiStreamByArnWithInvalidStreamArnFail() { + for (final Arn invalidStreamArn : Arrays.asList( + createArn("abc", SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE), // invalid partition + createArn(PARTITION, "dynamodb", KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE), // incorrect service + createArn(PARTITION, SERVICE, null, TEST_ACCOUNT_ID, RESOURCE), // missing region + createArn(PARTITION, SERVICE, KINESIS_REGION, null, RESOURCE), // missing account id + createArn(PARTITION, SERVICE, KINESIS_REGION, "123456789", RESOURCE), // account id not 12 digits + createArn( + PARTITION, SERVICE, KINESIS_REGION, "123456789abc", RESOURCE), // 12char alphanumeric account id + createArn(PARTITION, SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, "table/name"), // incorrect resource type + Arn.fromString( + "arn:aws:dynamodb:us-east-2:123456789012:table/myDynamoDBTable") // valid ARN for incorrect + // resource + )) { + try { + StreamIdentifier.multiStreamInstance(invalidStreamArn, EPOCH); + Assert.fail("Arn " + invalidStreamArn + " should not have created a StreamIdentifier"); + } catch (final IllegalArgumentException iae) { + // expected; ignore + } + } + } + + @Test(expected = IllegalArgumentException.class) + public void testNegativeCreationEpoch() { + StreamIdentifier.multiStreamInstance(DEFAULT_ARN, -123); + } + + @Test(expected = IllegalArgumentException.class) + public void testZeroCreationEpoch() { + StreamIdentifier.multiStreamInstance(DEFAULT_ARN, 0); + } + + @Test + public void testSingleStreamInstanceFromArn() { + final StreamIdentifier actualStreamIdentifier = StreamIdentifier.singleStreamInstance(DEFAULT_ARN); + + assertActualStreamIdentifierExpected(DEFAULT_ARN, actualStreamIdentifier); + assertEquals(Optional.empty(), actualStreamIdentifier.streamCreationEpochOptional()); + assertEquals(actualStreamIdentifier.streamName(), actualStreamIdentifier.serialize()); + } + + @Test + public void testMultiStreamInstanceFromArn() { + final StreamIdentifier actualStreamIdentifier = StreamIdentifier.multiStreamInstance(DEFAULT_ARN, EPOCH); + + assertActualStreamIdentifierExpected(DEFAULT_ARN, actualStreamIdentifier); + assertEquals(Optional.of(EPOCH), actualStreamIdentifier.streamCreationEpochOptional()); + assertEquals(serialize(), actualStreamIdentifier.serialize()); + } + + @Test + public void testSingleStreamInstanceWithName() { + StreamIdentifier actualStreamIdentifier = StreamIdentifier.singleStreamInstance(STREAM_NAME); + assertEquals(Optional.empty(), actualStreamIdentifier.streamCreationEpochOptional()); + assertEquals(Optional.empty(), actualStreamIdentifier.accountIdOptional()); + assertEquals(Optional.empty(), actualStreamIdentifier.streamArnOptional()); + assertEquals(STREAM_NAME, actualStreamIdentifier.streamName()); + } + + @Test + public void testMultiStreamInstanceWithIdentifierSerialization() { + StreamIdentifier actualStreamIdentifier = StreamIdentifier.multiStreamInstance(serialize()); + assertActualStreamIdentifierExpected(null, actualStreamIdentifier); + assertEquals(Optional.of(EPOCH), actualStreamIdentifier.streamCreationEpochOptional()); + } + + private void assertActualStreamIdentifierExpected(Arn expectedArn, StreamIdentifier actual) { + assertEquals(STREAM_NAME, actual.streamName()); + assertEquals(Optional.of(TEST_ACCOUNT_ID), actual.accountIdOptional()); + assertEquals(Optional.ofNullable(expectedArn), actual.streamArnOptional()); + } + + /** + * Creates a pattern that matches {@link StreamIdentifier} serialization. + */ + private static String serialize() { + return String.join(":", TEST_ACCOUNT_ID, STREAM_NAME, Long.toString(EPOCH)); + } + + private static Arn createArn() { + return createArn(PARTITION, SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE); + } + + private static Arn createArn(String partition, String service, Region region, String account, String resource) { + return Arn.builder() + .partition(partition) + .service(service) + .region(region != null ? region.id() : null) + .accountId(account) + .resource(resource) + .build(); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/KCLAppConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/KCLAppConfig.java new file mode 100644 index 00000000..0de2ae8e --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/KCLAppConfig.java @@ -0,0 +1,393 @@ +package software.amazon.kinesis.config; + +import java.io.IOException; +import java.net.Inet4Address; +import java.net.URISyntaxException; +import java.net.UnknownHostException; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import lombok.Builder; +import lombok.Value; +import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClientBuilder; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClientBuilder; +import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; +import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse; +import software.amazon.awssdk.services.sts.StsAsyncClient; +import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.kinesis.application.TestRecordProcessorFactory; +import software.amazon.kinesis.common.ConfigsBuilder; +import software.amazon.kinesis.common.FutureUtils; +import software.amazon.kinesis.common.InitialPositionInStream; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import software.amazon.kinesis.common.StreamConfig; +import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy; +import software.amazon.kinesis.processor.MultiStreamTracker; +import software.amazon.kinesis.processor.ShardRecordProcessorFactory; +import software.amazon.kinesis.processor.SingleStreamTracker; +import software.amazon.kinesis.retrieval.RetrievalConfig; +import software.amazon.kinesis.retrieval.fanout.FanOutConfig; +import software.amazon.kinesis.retrieval.polling.PollingConfig; +import software.amazon.kinesis.utils.RecordValidatorQueue; +import software.amazon.kinesis.utils.ReshardOptions; + +/** + * Default configuration for a producer or consumer used in integration tests. + * Producer: puts records of size 60 KB at an interval of 100 ms + * Consumer: streaming configuration (vs polling) that starts processing records at shard horizon + */ +@Slf4j +public abstract class KCLAppConfig { + public static final String AWS_ACCOUNT_PROFILE_PROPERTY = "awsProfile"; + public static final String CROSS_ACCOUNT_PROFILE_PROPERTY = "awsCrossAccountProfile"; + public static final String CROSS_ACCOUNT_CONSUMER_NAME = "cross-account-consumer"; + public static final String INTEGRATION_TEST_RESOURCE_PREFIX = "KCLIntegrationTest"; + + private String accountIdForConsumer = null; + private String accountIdForStreamOwner = null; + private List streamNames = null; + private KinesisAsyncClient kinesisAsyncClientForConsumer; + private StsAsyncClient stsAsyncClientForConsumer; + private KinesisAsyncClient kinesisAsyncClientForStreamOwner; + private StsAsyncClient stsAsyncClientForStreamOwner; + private DynamoDbAsyncClient dynamoDbAsyncClient; + private CloudWatchAsyncClient cloudWatchAsyncClient; + private RecordValidatorQueue recordValidator; + + /** + * List of Strings, either stream names or valid stream Arns, to be used in testing. For single stream mode, return + * a list of size 1. For multistream mode, return a list of size > 1. + */ + public abstract List getStreamArns(); + + public List getStreamNames() { + if (this.streamNames == null) { + return getStreamArns().stream() + .map(streamArn -> + streamArn.toString().substring(streamArn.toString().indexOf("/") + 1)) + .collect(Collectors.toList()); + } else { + return this.streamNames; + } + } + + public abstract String getTestName(); + + public String getApplicationName() { + return INTEGRATION_TEST_RESOURCE_PREFIX + getTestName(); + } + + public int getShardCount() { + return 4; + } + + public Region getRegion() { + return Region.US_WEST_2; + } + + /** + * Gets credentials for passed in profile with "-DawsProfile" which should match "~/.aws/config". Otherwise, + * uses default profile credentials chain. + */ + private AwsCredentialsProvider getCredentialsProvider() { + final String awsProfile = System.getProperty(AWS_ACCOUNT_PROFILE_PROPERTY); + return (awsProfile != null) + ? ProfileCredentialsProvider.builder().profileName(awsProfile).build() + : DefaultCredentialsProvider.create(); + } + + public boolean isCrossAccount() { + return false; + } + + public AwsCredentialsProvider getCrossAccountCredentialsProvider() { + return null; + } + + public InitialPositionInStream getInitialPosition() { + return InitialPositionInStream.TRIM_HORIZON; + } + + public abstract Protocol getKinesisClientProtocol(); + + public ProducerConfig getProducerConfig() { + return ProducerConfig.builder() + .isBatchPut(false) + .batchSize(1) + .recordSizeKB(60) + .callPeriodMills(100) + .build(); + } + + public List getReshardFactorList() { + return null; + } + + public String getAccountIdForConsumer() { + if (this.accountIdForConsumer == null) { + try { + this.accountIdForConsumer = FutureUtils.resolveOrCancelFuture( + buildStsAsyncClientForConsumer().getCallerIdentity(), Duration.ofSeconds(30)) + .account(); + } catch (Exception e) { + log.error("Error when getting account ID through STS for consumer", e); + } + } + return this.accountIdForConsumer; + } + + public String getAccountIdForStreamOwner() { + if (this.accountIdForStreamOwner == null) { + try { + this.accountIdForStreamOwner = FutureUtils.resolveOrCancelFuture( + buildStsAsyncClientForStreamOwner().getCallerIdentity(), Duration.ofSeconds(30)) + .account(); + } catch (Exception e) { + log.error("Error when getting account ID through STS for consumer", e); + } + } + return this.accountIdForStreamOwner; + } + + public final KinesisAsyncClient buildAsyncKinesisClientForConsumer() throws URISyntaxException, IOException { + if (this.kinesisAsyncClientForConsumer == null) { + this.kinesisAsyncClientForConsumer = buildAsyncKinesisClient(getCredentialsProvider()); + } + return this.kinesisAsyncClientForConsumer; + } + + /** + * Builds the kinesis client for the account which owns the Kinesis stream. For cross account, this can be a + * different account than the account which gets records from the stream in the KCL. + * @return + * @throws URISyntaxException + * @throws IOException + */ + public final KinesisAsyncClient buildAsyncKinesisClientForStreamOwner() throws URISyntaxException, IOException { + if (this.kinesisAsyncClientForStreamOwner == null) { + final KinesisAsyncClient client; + if (isCrossAccount()) { + client = buildAsyncKinesisClient(getCrossAccountCredentialsProvider()); + } else { + client = buildAsyncKinesisClient(getCredentialsProvider()); + } + this.kinesisAsyncClientForStreamOwner = client; + } + return this.kinesisAsyncClientForStreamOwner; + } + + private KinesisAsyncClient buildAsyncKinesisClient(AwsCredentialsProvider creds) + throws URISyntaxException, IOException { + // Setup H2 client config. + final NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder() + .maxConcurrency(Integer.MAX_VALUE) + .protocol(getKinesisClientProtocol()); + + final SdkAsyncHttpClient sdkAsyncHttpClient = + builder.buildWithDefaults(AttributeMap.builder().build()); + + // Setup client builder by default values + final KinesisAsyncClientBuilder kinesisAsyncClientBuilder = + KinesisAsyncClient.builder().region(getRegion()); + kinesisAsyncClientBuilder.httpClient(sdkAsyncHttpClient); + kinesisAsyncClientBuilder.credentialsProvider(creds); + + return kinesisAsyncClientBuilder.build(); + } + + private StsAsyncClient buildStsAsyncClientForConsumer() { + if (this.stsAsyncClientForConsumer == null) { + this.stsAsyncClientForConsumer = StsAsyncClient.builder() + .credentialsProvider(getCredentialsProvider()) + .region(getRegion()) + .build(); + } + return this.stsAsyncClientForConsumer; + } + + private StsAsyncClient buildStsAsyncClientForStreamOwner() { + if (this.stsAsyncClientForStreamOwner == null) { + final StsAsyncClient client; + if (isCrossAccount()) { + client = buildStsAsyncClient(getCrossAccountCredentialsProvider()); + } else { + client = buildStsAsyncClient(getCredentialsProvider()); + } + this.stsAsyncClientForStreamOwner = client; + } + return this.stsAsyncClientForStreamOwner; + } + + private StsAsyncClient buildStsAsyncClient(AwsCredentialsProvider creds) { + return StsAsyncClient.builder() + .credentialsProvider(creds) + .region(getRegion()) + .build(); + } + + public final DynamoDbAsyncClient buildAsyncDynamoDbClient() throws IOException { + if (this.dynamoDbAsyncClient == null) { + final DynamoDbAsyncClientBuilder builder = + DynamoDbAsyncClient.builder().region(getRegion()); + builder.credentialsProvider(getCredentialsProvider()); + this.dynamoDbAsyncClient = builder.build(); + } + return this.dynamoDbAsyncClient; + } + + public final CloudWatchAsyncClient buildAsyncCloudWatchClient() throws IOException { + if (this.cloudWatchAsyncClient == null) { + final CloudWatchAsyncClientBuilder builder = + CloudWatchAsyncClient.builder().region(getRegion()); + builder.credentialsProvider(getCredentialsProvider()); + this.cloudWatchAsyncClient = builder.build(); + } + return this.cloudWatchAsyncClient; + } + + public final String getWorkerId() throws UnknownHostException { + return Inet4Address.getLocalHost().getHostName(); + } + + public final RecordValidatorQueue getRecordValidator() { + if (recordValidator == null) { + this.recordValidator = new RecordValidatorQueue(); + } + return this.recordValidator; + } + + public ShardRecordProcessorFactory getShardRecordProcessorFactory() { + return new TestRecordProcessorFactory(getRecordValidator()); + } + + public final ConfigsBuilder getConfigsBuilder(Map streamToConsumerArnsMap) + throws IOException, URISyntaxException { + final String workerId = getWorkerId(); + if (getStreamArns().size() == 1) { + final SingleStreamTracker singleStreamTracker = new SingleStreamTracker( + StreamIdentifier.singleStreamInstance(getStreamArns().get(0)), + buildStreamConfigList(streamToConsumerArnsMap).get(0)); + return new ConfigsBuilder( + singleStreamTracker, + getApplicationName(), + buildAsyncKinesisClientForConsumer(), + buildAsyncDynamoDbClient(), + buildAsyncCloudWatchClient(), + workerId, + getShardRecordProcessorFactory()); + } else { + final MultiStreamTracker multiStreamTracker = new MultiStreamTracker() { + @Override + public List streamConfigList() { + return buildStreamConfigList(streamToConsumerArnsMap); + } + + @Override + public FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy() { + return new FormerStreamsLeasesDeletionStrategy.NoLeaseDeletionStrategy(); + } + }; + return new ConfigsBuilder( + multiStreamTracker, + getApplicationName(), + buildAsyncKinesisClientForConsumer(), + buildAsyncDynamoDbClient(), + buildAsyncCloudWatchClient(), + workerId, + getShardRecordProcessorFactory()); + } + } + + private List buildStreamConfigList(Map streamToConsumerArnsMap) { + return getStreamArns().stream() + .map(streamArn -> { + final StreamIdentifier streamIdentifier; + if (getStreamArns().size() == 1) { + streamIdentifier = StreamIdentifier.singleStreamInstance(streamArn); + } else { // is multi-stream + streamIdentifier = StreamIdentifier.multiStreamInstance(streamArn, getCreationEpoch(streamArn)); + } + + if (streamToConsumerArnsMap != null) { + final StreamConfig streamConfig = new StreamConfig( + streamIdentifier, + InitialPositionInStreamExtended.newInitialPosition(getInitialPosition())); + return streamConfig.consumerArn( + streamToConsumerArnsMap.get(streamArn).toString()); + } else { + return new StreamConfig( + streamIdentifier, + InitialPositionInStreamExtended.newInitialPosition(getInitialPosition())); + } + }) + .collect(Collectors.toList()); + } + + private long getCreationEpoch(Arn streamArn) { + final DescribeStreamSummaryRequest request = DescribeStreamSummaryRequest.builder() + .streamARN(streamArn.toString()) + .build(); + + DescribeStreamSummaryResponse response = null; + try { + response = FutureUtils.resolveOrCancelFuture( + buildAsyncKinesisClientForStreamOwner().describeStreamSummary(request), Duration.ofSeconds(60)); + } catch (Exception e) { + log.error("Exception when calling DescribeStreamSummary", e); + } + return response.streamDescriptionSummary().streamCreationTimestamp().toEpochMilli(); + } + + public abstract RetrievalMode getRetrievalMode(); + + public RetrievalConfig getRetrievalConfig(ConfigsBuilder configsBuilder, Map streamToConsumerArnsMap) { + final RetrievalConfig config = configsBuilder.retrievalConfig(); + if (getRetrievalMode() == RetrievalMode.POLLING) { + config.retrievalSpecificConfig(new PollingConfig(config.kinesisClient())); + } else { + if (getStreamArns().size() == 1) { + final Arn consumerArn = + streamToConsumerArnsMap.get(getStreamArns().get(0)); + config.retrievalSpecificConfig( + new FanOutConfig(config.kinesisClient()).consumerArn(consumerArn.toString())); + } + // For CAA multi-stream EFO, consumerArn is specified in StreamConfig + } + return config; + } + + public Arn buildStreamArn(String streamName) { + final String partition = getRegion().metadata().partition().id(); + return Arn.fromString( + String.join(":", "arn", partition, "kinesis", getRegion().id(), getAccountIdForStreamOwner(), "stream") + + "/" + INTEGRATION_TEST_RESOURCE_PREFIX + streamName); + } + + /** + * Configure ingress load (batch size, record size, and calling interval) + */ + @Value + @Builder + public static class ProducerConfig { + private boolean isBatchPut; + private int batchSize; + private int recordSizeKB; + private long callPeriodMills; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH1TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH1TestConfig.java new file mode 100644 index 00000000..a8440176 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH1TestConfig.java @@ -0,0 +1,39 @@ +package software.amazon.kinesis.config; + +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; + +/** + * Config for a polling consumer with HTTP protocol of HTTP1 + */ +public class ReleaseCanaryPollingH1TestConfig extends KCLAppConfig { + + private final UUID uniqueId = UUID.randomUUID(); + + private final String applicationName = "PollingH1Test"; + private final String streamName = "2XPollingH1TestStream_" + uniqueId; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + return Collections.singletonList(buildStreamArn(streamName)); + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP1_1; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.POLLING; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH2TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH2TestConfig.java new file mode 100644 index 00000000..989f0fc6 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH2TestConfig.java @@ -0,0 +1,38 @@ +package software.amazon.kinesis.config; + +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; + +/** + * Config for a polling consumer with HTTP protocol of HTTP2 + */ +public class ReleaseCanaryPollingH2TestConfig extends KCLAppConfig { + private final UUID uniqueId = UUID.randomUUID(); + + private final String applicationName = "PollingH2Test"; + private final String streamName = "2XPollingH2TestStream_" + uniqueId; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + return Collections.singletonList(buildStreamArn(streamName)); + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.POLLING; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingReshardingTestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingReshardingTestConfig.java new file mode 100644 index 00000000..24a038de --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingReshardingTestConfig.java @@ -0,0 +1,51 @@ +package software.amazon.kinesis.config; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; +import software.amazon.kinesis.utils.ReshardOptions; + +import static software.amazon.kinesis.utils.ReshardOptions.MERGE; +import static software.amazon.kinesis.utils.ReshardOptions.SPLIT; + +public class ReleaseCanaryStreamingReshardingTestConfig extends KCLAppConfig { + + private final UUID uniqueId = UUID.randomUUID(); + + private final String applicationName = "StreamingReshardingTest"; + private final String streamName = "2XStreamingReshardingTestStream_" + uniqueId; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + return Collections.singletonList(buildStreamArn(streamName)); + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.STREAMING; + } + + @Override + public int getShardCount() { + return 20; + } + + @Override + public List getReshardFactorList() { + return Arrays.asList(SPLIT, MERGE); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingTestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingTestConfig.java new file mode 100644 index 00000000..c6a04ce2 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingTestConfig.java @@ -0,0 +1,38 @@ +package software.amazon.kinesis.config; + +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; + +/** + * Config for a streaming consumer with HTTP protocol of HTTP2 + */ +public class ReleaseCanaryStreamingTestConfig extends KCLAppConfig { + private final UUID uniqueId = UUID.randomUUID(); + + private final String applicationName = "StreamingTest"; + private final String streamName = "2XStreamingTestStream_" + uniqueId; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + return Collections.singletonList(buildStreamArn(streamName)); + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.STREAMING; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/RetrievalMode.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/RetrievalMode.java new file mode 100644 index 00000000..dcce39eb --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/RetrievalMode.java @@ -0,0 +1,6 @@ +package software.amazon.kinesis.config; + +public enum RetrievalMode { + POLLING, + STREAMING +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/KCLCrossAccountAppConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/KCLCrossAccountAppConfig.java new file mode 100644 index 00000000..ba334661 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/KCLCrossAccountAppConfig.java @@ -0,0 +1,26 @@ +package software.amazon.kinesis.config.crossaccount; + +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; +import software.amazon.kinesis.config.KCLAppConfig; + +/** + * Config class to configure cross account integration tests. + */ +public abstract class KCLCrossAccountAppConfig extends KCLAppConfig { + + @Override + public boolean isCrossAccount() { + return true; + } + + @Override + public AwsCredentialsProvider getCrossAccountCredentialsProvider() { + final String awsCrossAccountProfile = System.getProperty(KCLAppConfig.CROSS_ACCOUNT_PROFILE_PROPERTY); + return (awsCrossAccountProfile != null) + ? ProfileCredentialsProvider.builder() + .profileName(awsCrossAccountProfile) + .build() + : null; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig.java new file mode 100644 index 00000000..68f71799 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig.java @@ -0,0 +1,47 @@ +package software.amazon.kinesis.config.crossaccount; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; +import software.amazon.kinesis.config.RetrievalMode; + +/** + * Config for a cross account polling consumer with HTTP protocol of HTTP2 + */ +@Slf4j +public class ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig extends KCLCrossAccountAppConfig { + private final UUID uniqueId = UUID.randomUUID(); + + private final int numStreams = 2; + private final String applicationName = "CrossAccountMultiStreamPollingH2Test"; + + private final String streamName = "2XCrossAccountPollingH2TestStream"; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + ArrayList streamArns = new ArrayList<>(numStreams); + for (int i = 1; i <= numStreams; i++) { + streamArns.add(buildStreamArn(String.join("_", streamName, Integer.toString(i), uniqueId.toString()))); + } + return streamArns; + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.POLLING; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig.java new file mode 100644 index 00000000..19e2aa3f --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig.java @@ -0,0 +1,47 @@ +package software.amazon.kinesis.config.crossaccount; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; +import software.amazon.kinesis.config.RetrievalMode; + +/** + * Config for a cross account polling consumer with HTTP protocol of HTTP2 + */ +@Slf4j +public class ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig extends KCLCrossAccountAppConfig { + private final UUID uniqueId = UUID.randomUUID(); + + private final int numStreams = 2; + private final String applicationName = "CrossAccountMultiStreamStreamingTest"; + + private final String streamName = "2XCrossAccountStreamingTestStream"; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + ArrayList streamArns = new ArrayList<>(numStreams); + for (int i = 1; i <= numStreams; i++) { + streamArns.add(buildStreamArn(String.join("_", streamName, Integer.toString(i), uniqueId.toString()))); + } + return streamArns; + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.STREAMING; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountPollingH2TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountPollingH2TestConfig.java new file mode 100644 index 00000000..7f44408e --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountPollingH2TestConfig.java @@ -0,0 +1,42 @@ +package software.amazon.kinesis.config.crossaccount; + +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; +import software.amazon.kinesis.config.RetrievalMode; + +/** + * Config for a cross account polling consumer with HTTP protocol of HTTP2 + */ +@Slf4j +public class ReleaseCanaryCrossAccountPollingH2TestConfig extends KCLCrossAccountAppConfig { + private final UUID uniqueId = UUID.randomUUID(); + + private final String applicationName = "CrossAccountPollingH2Test"; + + private final String streamName = "2XCrossAccountPollingH2TestStream_" + uniqueId; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + return Collections.singletonList(buildStreamArn(streamName)); + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.POLLING; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountStreamingTestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountStreamingTestConfig.java new file mode 100644 index 00000000..594347aa --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountStreamingTestConfig.java @@ -0,0 +1,41 @@ +package software.amazon.kinesis.config.crossaccount; + +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; +import software.amazon.kinesis.config.RetrievalMode; + +/** + * Config for a streaming consumer with HTTP protocol of HTTP2 + */ +@Slf4j +public class ReleaseCanaryCrossAccountStreamingTestConfig extends KCLCrossAccountAppConfig { + private final UUID uniqueId = UUID.randomUUID(); + + private final String applicationName = "CrossAccountStreamingTest"; + private final String streamName = "2XCrossAccountStreamingTestStream_" + uniqueId; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + return Collections.singletonList(buildStreamArn(streamName)); + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.STREAMING; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamPollingH2TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamPollingH2TestConfig.java new file mode 100644 index 00000000..ae49ec56 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamPollingH2TestConfig.java @@ -0,0 +1,43 @@ +package software.amazon.kinesis.config; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; + +/** + * Config for a polling consumer with HTTP protocol of HTTP2 + */ +public class ReleaseCanaryMultiStreamPollingH2TestConfig extends KCLAppConfig { + private final UUID uniqueId = UUID.randomUUID(); + + private final int numStreams = 2; + private final String applicationName = "MultiStreamPollingH2Test"; + private final String streamName = "2XMultiStreamPollingH2TestStream"; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + ArrayList streamArns = new ArrayList<>(numStreams); + for (Integer i = 1; i <= numStreams; i++) { + streamArns.add(buildStreamArn(String.join("_", streamName, i.toString(), uniqueId.toString()))); + } + return streamArns; + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.POLLING; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamStreamingTestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamStreamingTestConfig.java new file mode 100644 index 00000000..d80a43d9 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamStreamingTestConfig.java @@ -0,0 +1,41 @@ +package software.amazon.kinesis.config.multistream; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; +import software.amazon.kinesis.config.KCLAppConfig; +import software.amazon.kinesis.config.RetrievalMode; + +public class ReleaseCanaryMultiStreamStreamingTestConfig extends KCLAppConfig { + private final UUID uniqueId = UUID.randomUUID(); + private final int numStreams = 2; + private final String applicationName = "MultiStreamStreamingTest"; + private final String streamName = "2XMultiStreamStreamingTestStream"; + + @Override + public String getTestName() { + return applicationName; + } + + @Override + public List getStreamArns() { + ArrayList streamArns = new ArrayList<>(numStreams); + for (int i = 1; i <= numStreams; i++) { + streamArns.add(buildStreamArn(String.join("_", streamName, Integer.toString(i), uniqueId.toString()))); + } + return streamArns; + } + + @Override + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } + + @Override + public RetrievalMode getRetrievalMode() { + return RetrievalMode.STREAMING; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDeciderTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDeciderTest.java index dff2a8cb..bbed04d3 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDeciderTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDeciderTest.java @@ -24,6 +24,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; + import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -33,6 +34,7 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseRefresher; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; @@ -63,10 +65,8 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { @Before public void setup() { numShardSyncWorkers = 1; - leaderDecider = new DeterministicShuffleShardSyncLeaderDecider(leaseRefresher, - scheduledExecutorService, - numShardSyncWorkers, - readWriteLock); + leaderDecider = new DeterministicShuffleShardSyncLeaderDecider( + leaseRefresher, scheduledExecutorService, numShardSyncWorkers, readWriteLock); when(readWriteLock.readLock()).thenReturn(mock(ReentrantReadWriteLock.ReadLock.class)); when(readWriteLock.writeLock()).thenReturn(mock(ReentrantReadWriteLock.WriteLock.class)); @@ -103,9 +103,9 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { } @Test - public void testElectedLeadersAsPerExpectedShufflingOrder() - throws Exception { - List leases = getLeases(5, false /*emptyLeaseOwner */,false /* duplicateLeaseOwner */, true /* activeLeases */); + public void testElectedLeadersAsPerExpectedShufflingOrder() throws Exception { + List leases = + getLeases(5, false /*emptyLeaseOwner */, false /* duplicateLeaseOwner */, true /* activeLeases */); when(leaseRefresher.listLeases()).thenReturn(leases); Set expectedLeaders = getExpectedLeaders(leases); for (String leader : expectedLeaders) { @@ -121,11 +121,10 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { @Test public void testElectedLeadersAsPerExpectedShufflingOrderWhenUniqueWorkersLessThanMaxLeaders() { this.numShardSyncWorkers = 5; // More than number of unique lease owners - leaderDecider = new DeterministicShuffleShardSyncLeaderDecider(leaseRefresher, - scheduledExecutorService, - numShardSyncWorkers, - readWriteLock); - List leases = getLeases(3, false /*emptyLeaseOwner */, false /* duplicateLeaseOwner */, true /* activeLeases */); + leaderDecider = new DeterministicShuffleShardSyncLeaderDecider( + leaseRefresher, scheduledExecutorService, numShardSyncWorkers, readWriteLock); + List leases = + getLeases(3, false /*emptyLeaseOwner */, false /* duplicateLeaseOwner */, true /* activeLeases */); Set expectedLeaders = getExpectedLeaders(leases); // All lease owners should be present in expected leaders set, and they should all be leaders. for (Lease lease : leases) { @@ -134,7 +133,8 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { } } - private List getLeases(int count, boolean emptyLeaseOwner, boolean duplicateLeaseOwner, boolean activeLeases) { + private List getLeases( + int count, boolean emptyLeaseOwner, boolean duplicateLeaseOwner, boolean activeLeases) { List leases = new ArrayList<>(); for (int i = 0; i < count; i++) { Lease lease = new Lease(); @@ -150,8 +150,12 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { } private Set getExpectedLeaders(List leases) { - List uniqueHosts = leases.stream().filter(lease -> lease.leaseOwner() != null) - .map(Lease::leaseOwner).distinct().sorted().collect(Collectors.toList()); + List uniqueHosts = leases.stream() + .filter(lease -> lease.leaseOwner() != null) + .map(Lease::leaseOwner) + .distinct() + .sorted() + .collect(Collectors.toList()); Collections.shuffle(uniqueHosts, new Random(DETERMINISTIC_SHUFFLE_SEED)); int numWorkers = Math.min(uniqueHosts.size(), this.numShardSyncWorkers); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DiagnosticEventsTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DiagnosticEventsTest.java index d6098cca..62751f01 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DiagnosticEventsTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DiagnosticEventsTest.java @@ -15,6 +15,11 @@ package software.amazon.kinesis.coordinator; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; + import lombok.extern.slf4j.Slf4j; import org.junit.Before; import org.junit.Test; @@ -25,11 +30,6 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseBuilder; import software.amazon.kinesis.leases.LeaseCoordinator; -import java.util.Collection; -import java.util.Collections; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.times; @@ -41,8 +41,10 @@ import static org.mockito.Mockito.when; public class DiagnosticEventsTest { @Mock private ThreadPoolExecutor executor; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private DiagnosticEventHandler defaultHandler; @@ -86,7 +88,7 @@ public class DiagnosticEventsTest { assertEquals(event.getLargestPoolSize(), largestPoolSize); assertEquals(event.getMaximumPoolSize(), maximumPoolSize); assertEquals(event.getLeasesOwned(), leaseAssignments.size()); - assertEquals(event.getCurrentQueueSize(),0); + assertEquals(0, event.getCurrentQueueSize()); verify(defaultHandler, times(1)).visit(event); } @@ -110,7 +112,7 @@ public class DiagnosticEventsTest { assertEquals(event.getExecutorStateEvent().getLargestPoolSize(), largestPoolSize); assertEquals(event.getExecutorStateEvent().getMaximumPoolSize(), maximumPoolSize); assertEquals(event.getExecutorStateEvent().getLeasesOwned(), leaseAssignments.size()); - assertEquals(event.getExecutorStateEvent().getCurrentQueueSize(),0); + assertEquals(0, event.getExecutorStateEvent().getCurrentQueueSize()); assertTrue(event.getThrowable() instanceof TestRejectedTaskException); verify(defaultHandler, times(1)).visit(event); @@ -136,21 +138,23 @@ public class DiagnosticEventsTest { assertEquals(executorStateEvent.getLargestPoolSize(), largestPoolSize); assertEquals(executorStateEvent.getMaximumPoolSize(), maximumPoolSize); assertEquals(executorStateEvent.getLeasesOwned(), leaseAssignments.size()); - assertEquals(executorStateEvent.getCurrentQueueSize(),0); + assertEquals(0, executorStateEvent.getCurrentQueueSize()); - RejectedTaskEvent rejectedTaskEvent = factory.rejectedTaskEvent(executorStateEvent, - new TestRejectedTaskException()); + RejectedTaskEvent rejectedTaskEvent = + factory.rejectedTaskEvent(executorStateEvent, new TestRejectedTaskException()); assertEquals(rejectedTaskEvent.getExecutorStateEvent().getActiveThreads(), activeThreadCount); assertEquals(rejectedTaskEvent.getExecutorStateEvent().getCoreThreads(), corePoolSize); assertEquals(rejectedTaskEvent.getExecutorStateEvent().getLargestPoolSize(), largestPoolSize); assertEquals(rejectedTaskEvent.getExecutorStateEvent().getMaximumPoolSize(), maximumPoolSize); assertEquals(rejectedTaskEvent.getExecutorStateEvent().getLeasesOwned(), leaseAssignments.size()); - assertEquals(rejectedTaskEvent.getExecutorStateEvent().getCurrentQueueSize(),0); + assertEquals(0, rejectedTaskEvent.getExecutorStateEvent().getCurrentQueueSize()); assertTrue(rejectedTaskEvent.getThrowable() instanceof TestRejectedTaskException); } private class TestRejectedTaskException extends Exception { - private TestRejectedTaskException() { super(); } + private TestRejectedTaskException() { + super(); + } } private class CustomHandler implements DiagnosticEventHandler { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java index 84dcaa9c..20b22226 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java @@ -14,18 +14,6 @@ */ package software.amazon.kinesis.coordinator; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.not; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; @@ -39,17 +27,34 @@ import org.mockito.verification.VerificationMode; import software.amazon.kinesis.leases.ShardInfo; import software.amazon.kinesis.lifecycle.ShardConsumer; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class GracefulShutdownCoordinatorTest { @Mock private CountDownLatch shutdownCompleteLatch; + @Mock private CountDownLatch notificationCompleteLatch; + + @Mock + private CountDownLatch finalShutdownLatch; + @Mock private Scheduler scheduler; + @Mock private Callable contextCallable; + @Mock private ConcurrentMap shardInfoConsumerMap; @@ -59,6 +64,7 @@ public class GracefulShutdownCoordinatorTest { when(shutdownCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); + when(finalShutdownLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); assertThat(requestedShutdownCallable.call(), equalTo(true)); verify(shutdownCompleteLatch).await(anyLong(), any(TimeUnit.class)); @@ -74,6 +80,7 @@ public class GracefulShutdownCoordinatorTest { when(notificationCompleteLatch.getCount()).thenReturn(1L, 0L); mockLatchAwait(shutdownCompleteLatch, true); when(shutdownCompleteLatch.getCount()).thenReturn(1L, 1L, 0L); + when(finalShutdownLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); when(scheduler.shutdownComplete()).thenReturn(false, true); mockShardInfoConsumerMap(1, 0); @@ -95,6 +102,7 @@ public class GracefulShutdownCoordinatorTest { mockLatchAwait(notificationCompleteLatch, true); mockLatchAwait(shutdownCompleteLatch, false, true); when(shutdownCompleteLatch.getCount()).thenReturn(1L, 0L); + when(finalShutdownLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); when(scheduler.shutdownComplete()).thenReturn(false, true); mockShardInfoConsumerMap(1, 0); @@ -119,6 +127,8 @@ public class GracefulShutdownCoordinatorTest { mockLatchAwait(shutdownCompleteLatch, true); when(shutdownCompleteLatch.getCount()).thenReturn(2L, 2L, 1L, 1L, 0L); + when(finalShutdownLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); + when(scheduler.shutdownComplete()).thenReturn(false, false, false, true); mockShardInfoConsumerMap(2, 1, 0); @@ -226,9 +236,11 @@ public class GracefulShutdownCoordinatorTest { when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); doAnswer(invocation -> { - Thread.currentThread().interrupt(); - return true; - }).when(scheduler).shutdown(); + Thread.currentThread().interrupt(); + return true; + }) + .when(scheduler) + .shutdown(); assertThat(requestedShutdownCallable.call(), equalTo(false)); verifyLatchAwait(notificationCompleteLatch); @@ -282,12 +294,51 @@ public class GracefulShutdownCoordinatorTest { @Test(expected = IllegalStateException.class) public void testWorkerShutdownCallableThrows() throws Exception { - Callable requestedShutdownCallable = new GracefulShutdownCoordinator().createGracefulShutdownCallable(contextCallable); + Callable requestedShutdownCallable = + new GracefulShutdownCoordinator().createGracefulShutdownCallable(contextCallable); when(contextCallable.call()).thenThrow(new IllegalStateException("Bad Shutdown")); requestedShutdownCallable.call(); } + @Test + public void testShutdownFailsDueToRecordProcessors() throws Exception { + Callable requestedShutdownCallable = buildRequestedShutdownCallable(); + + when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); + when(shutdownCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(false); + when(shutdownCompleteLatch.getCount()).thenReturn(1L); + when(scheduler.shutdownComplete()).thenReturn(true); + mockShardInfoConsumerMap(1); + + assertThat(requestedShutdownCallable.call(), equalTo(false)); + verifyLatchAwait(shutdownCompleteLatch); + } + + @Test + public void testShutdownFailsDueToWorker() throws Exception { + Callable requestedShutdownCallable = buildRequestedShutdownCallable(); + + when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); + when(shutdownCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); + when(finalShutdownLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(false); + + assertThat(requestedShutdownCallable.call(), equalTo(false)); + verifyLatchAwait(finalShutdownLatch); + } + + /** + * tests that shutdown still succeeds in the case where there are no leases returned by the lease coordinator + */ + @Test + public void testShutdownSuccessWithNoLeases() throws Exception { + Callable requestedShutdownCallable = buildRequestedShutdownCallableWithNullLatches(); + when(finalShutdownLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); + + assertThat(requestedShutdownCallable.call(), equalTo(true)); + verifyLatchAwait(finalShutdownLatch); + } + private void verifyLatchAwait(CountDownLatch latch) throws Exception { verifyLatchAwait(latch, times(1)); } @@ -305,8 +356,24 @@ public class GracefulShutdownCoordinatorTest { } private Callable buildRequestedShutdownCallable() throws Exception { - GracefulShutdownContext context = new GracefulShutdownContext(shutdownCompleteLatch, - notificationCompleteLatch, scheduler); + GracefulShutdownContext context = GracefulShutdownContext.builder() + .shutdownCompleteLatch(shutdownCompleteLatch) + .notificationCompleteLatch(notificationCompleteLatch) + .finalShutdownLatch(finalShutdownLatch) + .scheduler(scheduler) + .build(); + when(contextCallable.call()).thenReturn(context); + return new GracefulShutdownCoordinator().createGracefulShutdownCallable(contextCallable); + } + + /** + * finalShutdownLatch will always be initialized, but shutdownCompleteLatch and notificationCompleteLatch are not + * initialized in the case where there are no leases returned by the lease coordinator + */ + private Callable buildRequestedShutdownCallableWithNullLatches() throws Exception { + GracefulShutdownContext context = GracefulShutdownContext.builder() + .finalShutdownLatch(finalShutdownLatch) + .build(); when(contextCallable.call()).thenReturn(context); return new GracefulShutdownCoordinator().createGracefulShutdownCallable(contextCallable); } @@ -320,5 +387,4 @@ public class GracefulShutdownCoordinatorTest { when(shardInfoConsumerMap.size()).thenReturn(initialItemCount, additionalItemCounts); when(shardInfoConsumerMap.isEmpty()).thenReturn(initialItemCount == 0, additionalEmptyStates); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManagerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManagerTest.java index a2047a6b..1e6be18f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManagerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManagerTest.java @@ -15,6 +15,16 @@ package software.amazon.kinesis.coordinator; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.junit.Assert; @@ -37,15 +47,6 @@ import software.amazon.kinesis.leases.ShardSyncTaskManager; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -55,259 +56,359 @@ import static software.amazon.kinesis.coordinator.PeriodicShardSyncManager.MIN_H import static software.amazon.kinesis.leases.LeaseManagementConfig.DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY; @RunWith(MockitoJUnitRunner.class) - public class PeriodicShardSyncManagerTest { + private static final int MAX_DEPTH_WITH_IN_PROGRESS_PARENTS = 1; + private StreamIdentifier streamIdentifier; private PeriodicShardSyncManager periodicShardSyncManager; + @Mock private LeaderDecider leaderDecider; + @Mock private LeaseRefresher leaseRefresher; + @Mock Map currentStreamConfigMap; + @Mock Function shardSyncTaskManagerProvider; + @Mock + Map streamToShardSyncTaskManagerMap; + @Before public void setup() { - streamIdentifier = StreamIdentifier.multiStreamInstance("123:stream:456"); - periodicShardSyncManager = new PeriodicShardSyncManager("worker", leaderDecider, leaseRefresher, currentStreamConfigMap, - shardSyncTaskManagerProvider, true, new NullMetricsFactory(), 2 * 60 * 1000, 3); + streamIdentifier = StreamIdentifier.multiStreamInstance("123456789012:stream:456"); + periodicShardSyncManager = new PeriodicShardSyncManager( + "worker", + leaderDecider, + leaseRefresher, + currentStreamConfigMap, + shardSyncTaskManagerProvider, + streamToShardSyncTaskManagerMap, + true, + new NullMetricsFactory(), + 2 * 60 * 1000, + 3, + new AtomicBoolean(true)); } @Test public void testForFailureWhenHashRangesAreIncomplete() { - List hashRanges = new ArrayList() {{ - add(deserialize("0", "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); - add(deserialize("25", MAX_HASH_KEY.toString())); // Missing interval here - }}.stream().map(hashKeyRangeForLease -> { - Lease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - Assert.assertTrue(PeriodicShardSyncManager - .checkForHoleInHashKeyRanges(streamIdentifier, hashRanges).isPresent()); + List hashRanges = new ArrayList() { + { + add(deserialize("0", "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); + add(deserialize("25", MAX_HASH_KEY.toString())); // Missing interval here + } + }.stream() + .map(hashKeyRangeForLease -> { + Lease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + Assert.assertTrue(PeriodicShardSyncManager.checkForHoleInHashKeyRanges(streamIdentifier, hashRanges) + .isPresent()); } @Test public void testForSuccessWhenHashRangesAreComplete() { - List hashRanges = new ArrayList() {{ - add(deserialize("0", "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - Lease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - Assert.assertFalse(PeriodicShardSyncManager - .checkForHoleInHashKeyRanges(streamIdentifier, hashRanges).isPresent()); + List hashRanges = new ArrayList() { + { + add(deserialize("0", "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + Lease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + Assert.assertFalse(PeriodicShardSyncManager.checkForHoleInHashKeyRanges(streamIdentifier, hashRanges) + .isPresent()); } @Test public void testForSuccessWhenUnSortedHashRangesAreComplete() { - List hashRanges = new ArrayList() {{ - add(deserialize("4", "23")); - add(deserialize("2", "3")); - add(deserialize("0", "1")); - add(deserialize("24", MAX_HASH_KEY.toString())); - add(deserialize("6", "23")); - - }}.stream().map(hashKeyRangeForLease -> { - Lease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - Assert.assertFalse(PeriodicShardSyncManager - .checkForHoleInHashKeyRanges(streamIdentifier, hashRanges).isPresent()); + List hashRanges = new ArrayList() { + { + add(deserialize("4", "23")); + add(deserialize("2", "3")); + add(deserialize("0", "1")); + add(deserialize("24", MAX_HASH_KEY.toString())); + add(deserialize("6", "23")); + } + }.stream() + .map(hashKeyRangeForLease -> { + Lease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + Assert.assertFalse(PeriodicShardSyncManager.checkForHoleInHashKeyRanges(streamIdentifier, hashRanges) + .isPresent()); } @Test public void testForSuccessWhenHashRangesAreCompleteForOverlappingLeasesAtEnd() { - List hashRanges = new ArrayList() {{ - add(deserialize("0", "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - add(deserialize("24", "45")); - }}.stream().map(hashKeyRangeForLease -> { - Lease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - Assert.assertFalse(PeriodicShardSyncManager - .checkForHoleInHashKeyRanges(streamIdentifier, hashRanges).isPresent()); + List hashRanges = new ArrayList() { + { + add(deserialize("0", "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + add(deserialize("24", "45")); + } + }.stream() + .map(hashKeyRangeForLease -> { + Lease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + Assert.assertFalse(PeriodicShardSyncManager.checkForHoleInHashKeyRanges(streamIdentifier, hashRanges) + .isPresent()); } @Test public void testIfShardSyncIsInitiatedWhenNoLeasesArePassed() { - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, null).shouldDoShardSync()); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, null) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsInitiatedWhenEmptyLeasesArePassed() { - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, new ArrayList<>()).shouldDoShardSync()); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, new ArrayList<>()) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsNotInitiatedWhenConfidenceFactorIsNotReached() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); // Hole between 23 and 25 - add(deserialize("25", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); // Hole between 23 and 25 + add(deserialize("25", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); } @Test public void testIfShardSyncIsInitiatedWhenConfidenceFactorIsReached() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); // Hole between 23 and 25 - add(deserialize("25", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); // Hole between 23 and 25 + add(deserialize("25", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsInitiatedWhenHoleIsDueToShardEnd() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); // introducing hole here through SHARD_END checkpoint - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - if(lease.hashKeyRangeForLease().startingHashKey().toString().equals("4")) { - lease.checkpoint(ExtendedSequenceNumber.SHARD_END); - } else { - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); // introducing hole here through SHARD_END checkpoint + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); } - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + if (lease.hashKeyRangeForLease() + .startingHashKey() + .toString() + .equals("4")) { + lease.checkpoint(ExtendedSequenceNumber.SHARD_END); + } else { + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + } + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsInitiatedWhenNoLeasesAreUsedDueToShardEnd() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.SHARD_END); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.SHARD_END); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsNotInitiatedWhenHoleShifts() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); // Hole between 23 and 25 - add(deserialize("25", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - List multiStreamLeases2 = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); // Hole between 3 and 5 - add(deserialize("5", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); // Hole between 23 and 25 + add(deserialize("25", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + List multiStreamLeases2 = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); // Hole between 3 and 5 + add(deserialize("5", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); // Resetting the holes - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases2).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases2).shouldDoShardSync()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases2) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases2) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsNotInitiatedWhenHoleShiftsMoreThanOnce() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); // Hole between 23 and 25 - add(deserialize("25", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - List multiStreamLeases2 = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); // Hole between 3 and 5 - add(deserialize("5", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); // Hole between 23 and 25 + add(deserialize("25", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + List multiStreamLeases2 = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); // Hole between 3 and 5 + add(deserialize("5", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); // Resetting the holes - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases2).shouldDoShardSync())); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases2) + .shouldDoShardSync())); // Resetting the holes - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); } @Test @@ -317,43 +418,56 @@ public class PeriodicShardSyncManagerTest { when(shardSyncTaskManagerProvider.apply(any())).thenReturn(shardSyncTaskManager); when(shardSyncTaskManager.shardDetector()).thenReturn(shardDetector); - final int[] shardCounter = { 0 }; - List hashKeyRangeForLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "20")); - add(deserialize("21", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}; + final int[] shardCounter = {0}; + List hashKeyRangeForLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "20")); + add(deserialize("21", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }; List kinesisShards = hashKeyRangeForLeases.stream() - .map(hashKeyRangeForLease -> Shard.builder().shardId("shard-" + (++shardCounter[0])).hashKeyRange( - HashKeyRange.builder().startingHashKey(hashKeyRangeForLease.serializedStartingHashKey()) - .endingHashKey(hashKeyRangeForLease.serializedEndingHashKey()).build()).build()) + .map(hashKeyRangeForLease -> Shard.builder() + .shardId("shard-" + (++shardCounter[0])) + .hashKeyRange(HashKeyRange.builder() + .startingHashKey(hashKeyRangeForLease.serializedStartingHashKey()) + .endingHashKey(hashKeyRangeForLease.serializedEndingHashKey()) + .build()) + .build()) .collect(Collectors.toList()); when(shardDetector.listShards()).thenReturn(kinesisShards); - final int[] leaseCounter = { 0 }; - List multiStreamLeases = hashKeyRangeForLeases.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-"+(++leaseCounter[0]))); - lease.shardId("shard-"+(leaseCounter[0])); - // Setting the hashrange only for last two leases - if(leaseCounter[0] >= 3) { - lease.hashKeyRange(hashKeyRangeForLease); - } - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); + final int[] leaseCounter = {0}; + List multiStreamLeases = hashKeyRangeForLeases.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.leaseKey( + MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-" + (++leaseCounter[0]))); + lease.shardId("shard-" + (leaseCounter[0])); + // Setting the hashrange only for last two leases + if (leaseCounter[0] >= 3) { + lease.hashKeyRange(hashKeyRangeForLease); + } + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); // Assert that shard sync should never trigger - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); // Assert that all the leases now has hashRanges set. - for(Lease lease : multiStreamLeases) { + for (Lease lease : multiStreamLeases) { Assert.assertNotNull(lease.hashKeyRangeForLease()); } } @@ -365,57 +479,73 @@ public class PeriodicShardSyncManagerTest { when(shardSyncTaskManagerProvider.apply(any())).thenReturn(shardSyncTaskManager); when(shardSyncTaskManager.shardDetector()).thenReturn(shardDetector); - final int[] shardCounter = { 0 }; - List hashKeyRangeForLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("5", "20")); // Hole between 3 and 5 - add(deserialize("21", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}; + final int[] shardCounter = {0}; + List hashKeyRangeForLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("5", "20")); // Hole between 3 and 5 + add(deserialize("21", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }; List kinesisShards = hashKeyRangeForLeases.stream() - .map(hashKeyRangeForLease -> Shard.builder().shardId("shard-" + (++shardCounter[0])).hashKeyRange( - HashKeyRange.builder().startingHashKey(hashKeyRangeForLease.serializedStartingHashKey()) - .endingHashKey(hashKeyRangeForLease.serializedEndingHashKey()).build()).build()) + .map(hashKeyRangeForLease -> Shard.builder() + .shardId("shard-" + (++shardCounter[0])) + .hashKeyRange(HashKeyRange.builder() + .startingHashKey(hashKeyRangeForLease.serializedStartingHashKey()) + .endingHashKey(hashKeyRangeForLease.serializedEndingHashKey()) + .build()) + .build()) .collect(Collectors.toList()); when(shardDetector.listShards()).thenReturn(kinesisShards); - final int[] leaseCounter = { 0 }; - List multiStreamLeases = hashKeyRangeForLeases.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-"+(++leaseCounter[0]))); - lease.shardId("shard-"+(leaseCounter[0])); - // Setting the hashrange only for last two leases - if(leaseCounter[0] >= 3) { - lease.hashKeyRange(hashKeyRangeForLease); - } - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); + final int[] leaseCounter = {0}; + List multiStreamLeases = hashKeyRangeForLeases.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.leaseKey( + MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-" + (++leaseCounter[0]))); + lease.shardId("shard-" + (leaseCounter[0])); + // Setting the hashrange only for last two leases + if (leaseCounter[0] >= 3) { + lease.hashKeyRange(hashKeyRangeForLease); + } + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); // Assert that shard sync should never trigger - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); // Assert that all the leases now has hashRanges set. - for(Lease lease : multiStreamLeases) { + for (Lease lease : multiStreamLeases) { Assert.assertNotNull(lease.hashKeyRangeForLease()); } } @Test public void testFor1000DifferentValidSplitHierarchyTreeTheHashRangesAreAlwaysComplete() { - for(int i=0; i < 1000; i++) { + for (int i = 0; i < 1000; i++) { int maxInitialLeaseCount = 100; List leases = generateInitialLeases(maxInitialLeaseCount); reshard(leases, 5, ReshardType.SPLIT, maxInitialLeaseCount, false); Collections.shuffle(leases); -// System.out.println( -// leases.stream().map(l -> l.checkpoint().sequenceNumber() + ":" + l.hashKeyRangeForLease()).collect(Collectors.toList())); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + // System.out.println( + // leases.stream().map(l -> l.checkpoint().sequenceNumber() + ":" + + // l.hashKeyRangeForLease()).collect(Collectors.toList())); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } @@ -426,7 +556,9 @@ public class PeriodicShardSyncManagerTest { List leases = generateInitialLeases(maxInitialLeaseCount); reshard(leases, 5, ReshardType.MERGE, maxInitialLeaseCount, false); Collections.shuffle(leases); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } @@ -437,7 +569,9 @@ public class PeriodicShardSyncManagerTest { List leases = generateInitialLeases(maxInitialLeaseCount); reshard(leases, 5, ReshardType.ANY, maxInitialLeaseCount, false); Collections.shuffle(leases); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } @@ -446,9 +580,11 @@ public class PeriodicShardSyncManagerTest { for (int i = 0; i < 1000; i++) { int maxInitialLeaseCount = 100; List leases = generateInitialLeases(maxInitialLeaseCount); - reshard(leases, 5, ReshardType.MERGE, maxInitialLeaseCount, true); + reshard(leases, MAX_DEPTH_WITH_IN_PROGRESS_PARENTS, ReshardType.MERGE, maxInitialLeaseCount, true); Collections.shuffle(leases); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } @@ -457,14 +593,14 @@ public class PeriodicShardSyncManagerTest { for (int i = 0; i < 1000; i++) { int maxInitialLeaseCount = 100; List leases = generateInitialLeases(maxInitialLeaseCount); - reshard(leases, 5, ReshardType.ANY, maxInitialLeaseCount, true); + reshard(leases, MAX_DEPTH_WITH_IN_PROGRESS_PARENTS, ReshardType.ANY, maxInitialLeaseCount, true); Collections.shuffle(leases); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } - - private List generateInitialLeases(int initialShardCount) { long hashRangeInternalMax = 10000000; List initialLeases = new ArrayList<>(); @@ -487,7 +623,11 @@ public class PeriodicShardSyncManagerTest { return initialLeases; } - private void reshard(List initialLeases, int depth, ReshardType reshardType, int leaseCounter, + private void reshard( + List initialLeases, + int depth, + ReshardType reshardType, + int leaseCounter, boolean shouldKeepSomeParentsInProgress) { for (int i = 0; i < depth; i++) { if (reshardType == ReshardType.SPLIT) { @@ -505,25 +645,29 @@ public class PeriodicShardSyncManagerTest { } private int merge(List initialLeases, int leaseCounter, boolean shouldKeepSomeParentsInProgress) { - List leasesEligibleForMerge = initialLeases.stream().filter(l -> CollectionUtils.isNullOrEmpty(l.childShardIds())) + List leasesEligibleForMerge = initialLeases.stream() + .filter(l -> CollectionUtils.isNullOrEmpty(l.childShardIds())) .collect(Collectors.toList()); -// System.out.println("Leases to merge : " + leasesEligibleForMerge); + // System.out.println("Leases to merge : " + leasesEligibleForMerge); int leasesToMerge = (int) ((leasesEligibleForMerge.size() - 1) / 2.0 * Math.random()); for (int i = 0; i < leasesToMerge; i += 2) { Lease parent1 = leasesEligibleForMerge.get(i); Lease parent2 = leasesEligibleForMerge.get(i + 1); - if(parent2.hashKeyRangeForLease().startingHashKey().subtract(parent1.hashKeyRangeForLease().endingHashKey()).equals(BigInteger.ONE)) - { + if (parent2.hashKeyRangeForLease() + .startingHashKey() + .subtract(parent1.hashKeyRangeForLease().endingHashKey()) + .equals(BigInteger.ONE)) { parent1.checkpoint(ExtendedSequenceNumber.SHARD_END); if (!shouldKeepSomeParentsInProgress || (shouldKeepSomeParentsInProgress && isOneFromDiceRoll())) { -// System.out.println("Deciding to keep parent in progress : " + parent2); + // System.out.println("Deciding to keep parent in progress : " + parent2); parent2.checkpoint(ExtendedSequenceNumber.SHARD_END); } Lease child = new Lease(); child.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); child.leaseKey("shard-" + (++leaseCounter)); -// System.out.println("Parent " + parent1 + " and " + parent2 + " merges into " + child); - child.hashKeyRange(new HashKeyRangeForLease(parent1.hashKeyRangeForLease().startingHashKey(), + // System.out.println("Parent " + parent1 + " and " + parent2 + " merges into " + child); + child.hashKeyRange(new HashKeyRangeForLease( + parent1.hashKeyRangeForLease().startingHashKey(), parent2.hashKeyRangeForLease().endingHashKey())); parent1.childShardIds(Collections.singletonList(child.leaseKey())); parent2.childShardIds(Collections.singletonList(child.leaseKey())); @@ -536,24 +680,31 @@ public class PeriodicShardSyncManagerTest { } private int split(List initialLeases, int leaseCounter) { - List leasesEligibleForSplit = initialLeases.stream().filter(l -> CollectionUtils.isNullOrEmpty(l.childShardIds())) + List leasesEligibleForSplit = initialLeases.stream() + .filter(l -> CollectionUtils.isNullOrEmpty(l.childShardIds())) .collect(Collectors.toList()); -// System.out.println("Leases to split : " + leasesEligibleForSplit); + // System.out.println("Leases to split : " + leasesEligibleForSplit); int leasesToSplit = (int) (leasesEligibleForSplit.size() * Math.random()); for (int i = 0; i < leasesToSplit; i++) { Lease parent = leasesEligibleForSplit.get(i); parent.checkpoint(ExtendedSequenceNumber.SHARD_END); Lease child1 = new Lease(); child1.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - child1.hashKeyRange(new HashKeyRangeForLease(parent.hashKeyRangeForLease().startingHashKey(), - parent.hashKeyRangeForLease().startingHashKey().add(parent.hashKeyRangeForLease().endingHashKey()) + child1.hashKeyRange(new HashKeyRangeForLease( + parent.hashKeyRangeForLease().startingHashKey(), + parent.hashKeyRangeForLease() + .startingHashKey() + .add(parent.hashKeyRangeForLease().endingHashKey()) .divide(new BigInteger("2")))); child1.leaseKey("shard-" + (++leaseCounter)); Lease child2 = new Lease(); child2.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); child2.hashKeyRange(new HashKeyRangeForLease( - parent.hashKeyRangeForLease().startingHashKey().add(parent.hashKeyRangeForLease().endingHashKey()) - .divide(new BigInteger("2")).add(new BigInteger("1")), + parent.hashKeyRangeForLease() + .startingHashKey() + .add(parent.hashKeyRangeForLease().endingHashKey()) + .divide(new BigInteger("2")) + .add(new BigInteger("1")), parent.hashKeyRangeForLease().endingHashKey())); child2.leaseKey("shard-" + (++leaseCounter)); @@ -561,7 +712,7 @@ public class PeriodicShardSyncManagerTest { child2.parentShardIds(Sets.newHashSet(parent.leaseKey())); parent.childShardIds(Lists.newArrayList(child1.leaseKey(), child2.leaseKey())); -// System.out.println("Parent " + parent + " splits into " + child1 + " and " + child2); + // System.out.println("Parent " + parent + " splits into " + child1 + " and " + child2); initialLeases.add(child1); initialLeases.add(child2); @@ -577,13 +728,9 @@ public class PeriodicShardSyncManagerTest { return Math.random() <= 0.16; } - private enum ReshardType { SPLIT, MERGE, ANY } - - - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java index aa9f8412..f5e81d4f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java @@ -15,30 +15,9 @@ package software.amazon.kinesis.coordinator; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.same; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.doCallRealMethod; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static org.mockito.internal.verification.VerificationModeFactory.atMost; -import static software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.*; - import java.time.Duration; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.HashMap; @@ -46,10 +25,13 @@ import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.RejectedExecutionException; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; +import java.util.stream.Stream; import com.google.common.base.Joiner; import com.google.common.collect.Sets; @@ -60,13 +42,18 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.Spy; import org.mockito.runners.MockitoJUnitRunner; - +import org.mockito.stubbing.OngoingStubbing; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; +import software.amazon.awssdk.services.kinesis.KinesisServiceClientConfiguration; +import software.amazon.awssdk.utils.StringUtils; import software.amazon.kinesis.checkpoint.Checkpoint; import software.amazon.kinesis.checkpoint.CheckpointConfig; import software.amazon.kinesis.checkpoint.CheckpointFactory; @@ -76,8 +63,8 @@ import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.exceptions.KinesisClientLibException; import software.amazon.kinesis.exceptions.KinesisClientLibNonRetryableException; -import software.amazon.kinesis.leases.LeaseCleanupManager; import software.amazon.kinesis.leases.HierarchicalShardSyncer; +import software.amazon.kinesis.leases.LeaseCleanupManager; import software.amazon.kinesis.leases.LeaseCoordinator; import software.amazon.kinesis.leases.LeaseManagementConfig; import software.amazon.kinesis.leases.LeaseManagementFactory; @@ -98,19 +85,44 @@ import software.amazon.kinesis.lifecycle.events.LeaseLostInput; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.lifecycle.events.ShardEndedInput; import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; -import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.MetricsConfig; +import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.processor.Checkpointer; import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy; +import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.AutoDetectionAndDeferredDeletionStrategy; +import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.NoLeaseDeletionStrategy; +import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.ProvidedStreamsDeferredDeletionStrategy; import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.ProcessorConfig; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.processor.ShardRecordProcessor; +import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.RetrievalConfig; import software.amazon.kinesis.retrieval.RetrievalFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.internal.verification.VerificationModeFactory.atMost; + /** * */ @@ -121,9 +133,16 @@ public class SchedulerTest { private final String applicationName = "applicationName"; private final String streamName = "streamName"; private final String namespace = "testNamespace"; - private static final long MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS = 5 * 1000L; + private static final long MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS = 1000L; private static final long MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS = 30 * 1000L; private static final long LEASE_TABLE_CHECK_FREQUENCY_MILLIS = 3 * 1000L; + private static final Region TEST_REGION = Region.US_EAST_2; + private static final int ACCOUNT_ID_LENGTH = 12; + private static final long TEST_ACCOUNT = Long.parseLong(StringUtils.repeat("1", ACCOUNT_ID_LENGTH)); + private static final long TEST_EPOCH = 1234567890L; + private static final String TEST_SHARD_ID = "shardId-000000000001"; + private static final InitialPositionInStreamExtended TEST_INITIAL_POSITION = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); private Scheduler scheduler; private ShardRecordProcessorFactory shardRecordProcessorFactory; @@ -137,28 +156,40 @@ public class SchedulerTest { @Mock private KinesisAsyncClient kinesisClient; + @Mock private DynamoDbAsyncClient dynamoDBClient; + @Mock private CloudWatchAsyncClient cloudWatchClient; + @Mock private RetrievalFactory retrievalFactory; + @Mock private RecordsPublisher recordsPublisher; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private ShardSyncTaskManager shardSyncTaskManager; + @Mock private DynamoDBLeaseRefresher dynamoDBLeaseRefresher; + @Mock private ShardDetector shardDetector; + @Mock private Checkpointer checkpoint; + @Mock private WorkerStateChangeListener workerStateChangeListener; + @Spy private TestMultiStreamTracker multiStreamTracker; + @Mock private LeaseCleanupManager leaseCleanupManager; @@ -172,23 +203,38 @@ public class SchedulerTest { shardRecordProcessorFactory = new TestShardRecordProcessorFactory(); checkpointConfig = new CheckpointConfig().checkpointFactory(new TestKinesisCheckpointFactory()); - coordinatorConfig = new CoordinatorConfig(applicationName).parentShardPollIntervalMillis(100L).workerStateChangeListener(workerStateChangeListener); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, streamName, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, false)); + coordinatorConfig = new CoordinatorConfig(applicationName) + .parentShardPollIntervalMillis(100L) + .workerStateChangeListener(workerStateChangeListener); + leaseManagementConfig = new LeaseManagementConfig( + tableName, dynamoDBClient, kinesisClient, streamName, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, false)); lifecycleConfig = new LifecycleConfig(); metricsConfig = new MetricsConfig(cloudWatchClient, namespace); processorConfig = new ProcessorConfig(shardRecordProcessorFactory); - retrievalConfig = new RetrievalConfig(kinesisClient, streamName, applicationName) - .retrievalFactory(retrievalFactory); + retrievalConfig = + new RetrievalConfig(kinesisClient, streamName, applicationName).retrievalFactory(retrievalFactory); when(leaseCoordinator.leaseRefresher()).thenReturn(dynamoDBLeaseRefresher); when(shardSyncTaskManager.shardDetector()).thenReturn(shardDetector); when(shardSyncTaskManager.hierarchicalShardSyncer()).thenReturn(new HierarchicalShardSyncer()); when(shardSyncTaskManager.callShardSyncTask()).thenReturn(new TaskResult(null)); - when(retrievalFactory.createGetRecordsCache(any(ShardInfo.class), any(StreamConfig.class), any(MetricsFactory.class))).thenReturn(recordsPublisher); + when(retrievalFactory.createGetRecordsCache( + any(ShardInfo.class), any(StreamConfig.class), any(MetricsFactory.class))) + .thenReturn(recordsPublisher); when(shardDetector.streamIdentifier()).thenReturn(mock(StreamIdentifier.class)); + when(kinesisClient.serviceClientConfiguration()) + .thenReturn(KinesisServiceClientConfiguration.builder() + .region(TEST_REGION) + .build()); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); } /** @@ -198,8 +244,14 @@ public class SchedulerTest { public void testGetStageName() { final String stageName = "testStageName"; coordinatorConfig = new CoordinatorConfig(stageName); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); assertEquals(stageName, scheduler.applicationName()); } @@ -208,17 +260,20 @@ public class SchedulerTest { final String shardId = "shardId-000000000000"; final String concurrencyToken = "concurrencyToken"; final ShardInfo shardInfo = new ShardInfo(shardId, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - final ShardConsumer shardConsumer1 = scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer1 = + scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory, leaseCleanupManager); assertNotNull(shardConsumer1); - final ShardConsumer shardConsumer2 = scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer2 = + scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory, leaseCleanupManager); assertNotNull(shardConsumer2); assertSame(shardConsumer1, shardConsumer2); final String anotherConcurrencyToken = "anotherConcurrencyToken"; - final ShardInfo shardInfo2 = new ShardInfo(shardId, anotherConcurrencyToken, null, - ExtendedSequenceNumber.TRIM_HORIZON); - final ShardConsumer shardConsumer3 = scheduler.createOrGetShardConsumer(shardInfo2, shardRecordProcessorFactory, leaseCleanupManager); + final ShardInfo shardInfo2 = + new ShardInfo(shardId, anotherConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + final ShardConsumer shardConsumer3 = + scheduler.createOrGetShardConsumer(shardInfo2, shardRecordProcessorFactory, leaseCleanupManager); assertNotNull(shardConsumer3); assertNotSame(shardConsumer1, shardConsumer3); @@ -233,12 +288,12 @@ public class SchedulerTest { final ExtendedSequenceNumber secondSequenceNumber = new ExtendedSequenceNumber("1000"); final ExtendedSequenceNumber finalSequenceNumber = new ExtendedSequenceNumber("2000"); - final List initialShardInfo = Collections.singletonList( - new ShardInfo(shardId, concurrencyToken, null, firstSequenceNumber)); - final List firstShardInfo = Collections.singletonList( - new ShardInfo(shardId, concurrencyToken, null, secondSequenceNumber)); - final List secondShardInfo = Collections.singletonList( - new ShardInfo(shardId, concurrencyToken, null, finalSequenceNumber)); + final List initialShardInfo = + Collections.singletonList(new ShardInfo(shardId, concurrencyToken, null, firstSequenceNumber)); + final List firstShardInfo = + Collections.singletonList(new ShardInfo(shardId, concurrencyToken, null, secondSequenceNumber)); + final List secondShardInfo = + Collections.singletonList(new ShardInfo(shardId, concurrencyToken, null, finalSequenceNumber)); final Checkpoint firstCheckpoint = new Checkpoint(firstSequenceNumber, null, null); @@ -250,9 +305,12 @@ public class SchedulerTest { schedulerSpy.runProcessLoop(); schedulerSpy.runProcessLoop(); - verify(schedulerSpy).buildConsumer(same(initialShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); - verify(schedulerSpy, never()).buildConsumer(same(firstShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); - verify(schedulerSpy, never()).buildConsumer(same(secondShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); + verify(schedulerSpy) + .buildConsumer(same(initialShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); + verify(schedulerSpy, never()) + .buildConsumer(same(firstShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); + verify(schedulerSpy, never()) + .buildConsumer(same(secondShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); verify(checkpoint).getCheckpointObject(eq(shardId)); } @@ -264,14 +322,16 @@ public class SchedulerTest { final String anotherConcurrencyToken = "anotherConcurrencyToken"; final ShardInfo shardInfo0 = new ShardInfo(shard0, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - final ShardInfo shardInfo0WithAnotherConcurrencyToken = new ShardInfo(shard0, anotherConcurrencyToken, null, - ExtendedSequenceNumber.TRIM_HORIZON); + final ShardInfo shardInfo0WithAnotherConcurrencyToken = + new ShardInfo(shard0, anotherConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); final ShardInfo shardInfo1 = new ShardInfo(shard1, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - final ShardConsumer shardConsumer0 = scheduler.createOrGetShardConsumer(shardInfo0, shardRecordProcessorFactory, leaseCleanupManager); - final ShardConsumer shardConsumer0WithAnotherConcurrencyToken = - scheduler.createOrGetShardConsumer(shardInfo0WithAnotherConcurrencyToken, shardRecordProcessorFactory, leaseCleanupManager); - final ShardConsumer shardConsumer1 = scheduler.createOrGetShardConsumer(shardInfo1, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer0 = + scheduler.createOrGetShardConsumer(shardInfo0, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer0WithAnotherConcurrencyToken = scheduler.createOrGetShardConsumer( + shardInfo0WithAnotherConcurrencyToken, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer1 = + scheduler.createOrGetShardConsumer(shardInfo1, shardRecordProcessorFactory, leaseCleanupManager); Set shards = new HashSet<>(); shards.add(shardInfo0); @@ -289,23 +349,38 @@ public class SchedulerTest { public final void testInitializationFailureWithRetries() throws Exception { doNothing().when(leaseCoordinator).initialize(); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenThrow(new RuntimeException()); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, streamName, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, true)); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + leaseManagementConfig = new LeaseManagementConfig( + tableName, dynamoDBClient, kinesisClient, streamName, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, true)); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.run(); - verify(dynamoDBLeaseRefresher, times(coordinatorConfig.maxInitializationAttempts())).isLeaseTableEmpty(); + verify(dynamoDBLeaseRefresher, times(coordinatorConfig.maxInitializationAttempts())) + .isLeaseTableEmpty(); } @Test public final void testInitializationFailureWithRetriesWithConfiguredMaxInitializationAttempts() throws Exception { final int maxInitializationAttempts = 5; coordinatorConfig.maxInitializationAttempts(maxInitializationAttempts); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, streamName, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, true)); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + leaseManagementConfig = new LeaseManagementConfig( + tableName, dynamoDBClient, kinesisClient, streamName, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, true)); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); doNothing().when(leaseCoordinator).initialize(); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenThrow(new RuntimeException()); @@ -317,43 +392,54 @@ public class SchedulerTest { } @Test - public final void testMultiStreamInitialization() throws ProvisionedThroughputException, DependencyException { + public final void testMultiStreamInitialization() { retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(true, true)); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(true, true)); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.initialize(); - shardDetectorMap.values().stream() - .forEach(shardDetector -> verify(shardDetector, times(1)).listShards()); - shardSyncTaskManagerMap.values().stream() - .forEach(shardSyncTM -> verify(shardSyncTM, times(1)).hierarchicalShardSyncer()); + shardDetectorMap.values().forEach(shardDetector -> verify(shardDetector, times(1)) + .listShards()); + shardSyncTaskManagerMap.values().forEach(shardSyncTM -> verify(shardSyncTM, times(1)) + .hierarchicalShardSyncer()); } @Test public final void testMultiStreamInitializationWithFailures() { retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(true, true)); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(true, true)); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.initialize(); // Note : As of today we retry for all streams in the next attempt. Hence the retry for each stream will vary. // At the least we expect 2 retries for each stream. Since there are 4 streams, we expect at most // the number of calls to be 5. - shardDetectorMap.values().stream() - .forEach(shardDetector -> verify(shardDetector, atLeast(2)).listShards()); - shardDetectorMap.values().stream() - .forEach(shardDetector -> verify(shardDetector, atMost(5)).listShards()); - shardSyncTaskManagerMap.values().stream() - .forEach(shardSyncTM -> verify(shardSyncTM, atLeast(2)).hierarchicalShardSyncer()); - shardSyncTaskManagerMap.values().stream() - .forEach(shardSyncTM -> verify(shardSyncTM, atMost(5)).hierarchicalShardSyncer()); + shardDetectorMap.values().forEach(shardDetector -> { + verify(shardDetector, atLeast(2)).listShards(); + verify(shardDetector, atMost(5)).listShards(); + }); + shardSyncTaskManagerMap.values().forEach(shardSyncTM -> { + verify(shardSyncTM, atLeast(2)).hierarchicalShardSyncer(); + verify(shardSyncTM, atMost(5)).hierarchicalShardSyncer(); + }); } - @Test public final void testMultiStreamConsumersAreBuiltOncePerAccountStreamShard() throws KinesisClientLibException { final String shardId = "shardId-000000000000"; @@ -363,14 +449,29 @@ public class SchedulerTest { final ExtendedSequenceNumber finalSequenceNumber = new ExtendedSequenceNumber("2000"); final List initialShardInfo = multiStreamTracker.streamConfigList().stream() - .map(sc -> new ShardInfo(shardId, concurrencyToken, null, firstSequenceNumber, - sc.streamIdentifier().serialize())).collect(Collectors.toList()); + .map(sc -> new ShardInfo( + shardId, + concurrencyToken, + null, + firstSequenceNumber, + sc.streamIdentifier().serialize())) + .collect(Collectors.toList()); final List firstShardInfo = multiStreamTracker.streamConfigList().stream() - .map(sc -> new ShardInfo(shardId, concurrencyToken, null, secondSequenceNumber, - sc.streamIdentifier().serialize())).collect(Collectors.toList()); + .map(sc -> new ShardInfo( + shardId, + concurrencyToken, + null, + secondSequenceNumber, + sc.streamIdentifier().serialize())) + .collect(Collectors.toList()); final List secondShardInfo = multiStreamTracker.streamConfigList().stream() - .map(sc -> new ShardInfo(shardId, concurrencyToken, null, finalSequenceNumber, - sc.streamIdentifier().serialize())).collect(Collectors.toList()); + .map(sc -> new ShardInfo( + shardId, + concurrencyToken, + null, + finalSequenceNumber, + sc.streamIdentifier().serialize())) + .collect(Collectors.toList()); final Checkpoint firstCheckpoint = new Checkpoint(firstSequenceNumber, null, null); @@ -378,151 +479,199 @@ public class SchedulerTest { when(checkpoint.getCheckpointObject(anyString())).thenReturn(firstCheckpoint); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); Scheduler schedulerSpy = spy(scheduler); schedulerSpy.runProcessLoop(); schedulerSpy.runProcessLoop(); schedulerSpy.runProcessLoop(); - initialShardInfo.stream().forEach( - shardInfo -> verify(schedulerSpy).buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), same(leaseCleanupManager))); - firstShardInfo.stream().forEach( - shardInfo -> verify(schedulerSpy, never()).buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), eq(leaseCleanupManager))); - secondShardInfo.stream().forEach( - shardInfo -> verify(schedulerSpy, never()).buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), eq(leaseCleanupManager))); - + initialShardInfo.forEach(shardInfo -> verify(schedulerSpy) + .buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), same(leaseCleanupManager))); + firstShardInfo.forEach(shardInfo -> verify(schedulerSpy, never()) + .buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), eq(leaseCleanupManager))); + secondShardInfo.forEach(shardInfo -> verify(schedulerSpy, never()) + .buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), eq(leaseCleanupManager))); } @Test public final void testMultiStreamNoStreamsAreSyncedWhenStreamsAreNotRefreshed() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); Assert.assertTrue("SyncedStreams should be empty", syncedStreams.isEmpty()); - Assert.assertEquals(new HashSet(streamConfigList1), new HashSet(scheduler.currentStreamConfigMap().values())); + assertEquals( + new HashSet<>(streamConfigList1), + new HashSet<>(scheduler.currentStreamConfigMap().values())); } @Test public final void testMultiStreamOnlyNewStreamsAreSynced() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(1, 7).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(1, 7) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); - Set expectedSyncedStreams = IntStream.range(5, 7).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(HashSet::new)); + Set expectedSyncedStreams = IntStream.range(5, 7) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( + Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) + .collect(Collectors.toCollection(HashSet::new)); Assert.assertEquals(expectedSyncedStreams, syncedStreams); - Assert.assertEquals(Sets.newHashSet(streamConfigList2), + Assert.assertEquals( + Sets.newHashSet(streamConfigList2), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); } @Test - public final void testMultiStreamSyncFromTableDefaultInitPos() - throws DependencyException, ProvisionedThroughputException, InvalidStateException { + public final void testMultiStreamSyncFromTableDefaultInitPos() { // Streams in lease table but not tracked by multiStreamTracker - List leasesInTable = IntStream.range(1, 3).mapToObj(streamId -> new MultiStreamLease() - .streamIdentifier( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)) - .shardId("some_random_shard_id")) + List leasesInTable = IntStream.range(1, 3) + .mapToObj(streamId -> new MultiStreamLease() + .streamIdentifier(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)) + .shardId("some_random_shard_id")) .collect(Collectors.toCollection(LinkedList::new)); - // Include a stream that is already tracked by multiStreamTracker, just to make sure we will not touch this stream config later - leasesInTable.add(new MultiStreamLease().streamIdentifier("acc1:stream1:1").shardId("some_random_shard_id")); + // Include a stream that is already tracked by multiStreamTracker, just to make sure we will not touch this + // stream config later + leasesInTable.add(new MultiStreamLease() + .streamIdentifier("123456789012:stream1:1") + .shardId("some_random_shard_id")); // Expected StreamConfig after running syncStreamsFromLeaseTableOnAppInit // By default, Stream not present in multiStreamTracker will have initial position of LATEST - List expectedConfig = IntStream.range(1, 3).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List expectedConfig = IntStream.range(1, 3) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); // Include default configs expectedConfig.addAll(multiStreamTracker.streamConfigList()); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.syncStreamsFromLeaseTableOnAppInit(leasesInTable); - Map expectedConfigMap = expectedConfig.stream().collect(Collectors.toMap( - sc -> sc.streamIdentifier(), sc -> sc)); + Map expectedConfigMap = + expectedConfig.stream().collect(Collectors.toMap(StreamConfig::streamIdentifier, Function.identity())); Assert.assertEquals(expectedConfigMap, scheduler.currentStreamConfigMap()); } @Test - public final void testMultiStreamSyncFromTableCustomInitPos() - throws DependencyException, ProvisionedThroughputException, InvalidStateException { + public final void testMultiStreamSyncFromTableCustomInitPos() { Date testTimeStamp = new Date(); // Streams in lease table but not tracked by multiStreamTracker - List leasesInTable = IntStream.range(1, 3).mapToObj(streamId -> new MultiStreamLease() - .streamIdentifier( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)) - .shardId("some_random_shard_id")) + List leasesInTable = IntStream.range(1, 3) + .mapToObj(streamId -> new MultiStreamLease() + .streamIdentifier(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)) + .shardId("some_random_shard_id")) .collect(Collectors.toCollection(LinkedList::new)); - // Include a stream that is already tracked by multiStreamTracker, just to make sure we will not touch this stream config later - leasesInTable.add(new MultiStreamLease().streamIdentifier("acc1:stream1:1").shardId("some_random_shard_id")); + // Include a stream that is already tracked by multiStreamTracker, just to make sure we will not touch this + // stream config later + leasesInTable.add(new MultiStreamLease() + .streamIdentifier("123456789012:stream1:1") + .shardId("some_random_shard_id")); // Expected StreamConfig after running syncStreamsFromLeaseTableOnAppInit - // Stream not present in multiStreamTracker will have initial position specified by orphanedStreamInitialPositionInStream - List expectedConfig = IntStream.range(1, 3).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPositionAtTimestamp(testTimeStamp))) + // Stream not present in multiStreamTracker will have initial position specified by + // orphanedStreamInitialPositionInStream + List expectedConfig = IntStream.range(1, 3) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(testTimeStamp))) .collect(Collectors.toCollection(LinkedList::new)); // Include default configs expectedConfig.addAll(multiStreamTracker.streamConfigList()); // Mock a specific orphanedStreamInitialPositionInStream specified in multiStreamTracker - when(multiStreamTracker.orphanedStreamInitialPositionInStream()).thenReturn( - InitialPositionInStreamExtended.newInitialPositionAtTimestamp(testTimeStamp)); + when(multiStreamTracker.orphanedStreamInitialPositionInStream()) + .thenReturn(InitialPositionInStreamExtended.newInitialPositionAtTimestamp(testTimeStamp)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.syncStreamsFromLeaseTableOnAppInit(leasesInTable); - Map expectedConfigMap = expectedConfig.stream().collect(Collectors.toMap( - sc -> sc.streamIdentifier(), sc -> sc)); + Map expectedConfigMap = + expectedConfig.stream().collect(Collectors.toMap(sc -> sc.streamIdentifier(), sc -> sc)); Assert.assertEquals(expectedConfigMap, scheduler.currentStreamConfigMap()); } @Test public final void testMultiStreamStaleStreamsAreNotDeletedImmediatelyAutoDeletionStrategy() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamStaleStreamsAreNotDeletedImmediately(true, false); } @@ -536,93 +685,121 @@ public class SchedulerTest { @Test public final void testMultiStreamStaleStreamsAreNotDeletedImmediatelyProvidedListStrategy() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return null; - } + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return null; + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamStaleStreamsAreNotDeletedImmediately(false, false); } @Test public final void testMultiStreamStaleStreamsAreNotDeletedImmediatelyProvidedListStrategy2() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return IntStream.range(1, 3).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(ArrayList::new)); - } + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join( + streamId * TEST_ACCOUNT, + "multiStreamTest-" + streamId, + streamId * 12345))) + .collect(Collectors.toCollection(ArrayList::new)); + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamStaleStreamsAreNotDeletedImmediately(true, false); } - private final void testMultiStreamStaleStreamsAreNotDeletedImmediately(boolean expectPendingStreamsForDeletion, - boolean onlyStreamsDeletionNotLeases) + private void testMultiStreamStaleStreamsAreNotDeletedImmediately( + boolean expectPendingStreamsForDeletion, boolean onlyStreamsDeletionNotLeases) throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(3, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(3, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + mockListLeases(streamConfigList1); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); - Set expectedPendingStreams = IntStream.range(1, 3).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(HashSet::new)); - Set expectedSyncedStreams = onlyStreamsDeletionNotLeases ? expectedPendingStreams : Sets.newHashSet(); + Set expectedPendingStreams = IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( + Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) + .collect(Collectors.toCollection(HashSet::new)); + Set expectedSyncedStreams = + onlyStreamsDeletionNotLeases ? expectedPendingStreams : Sets.newHashSet(); Assert.assertEquals(expectedSyncedStreams, syncedStreams); - Assert.assertEquals(Sets.newHashSet(onlyStreamsDeletionNotLeases ? streamConfigList2 : streamConfigList1), + Assert.assertEquals( + Sets.newHashSet(onlyStreamsDeletionNotLeases ? streamConfigList2 : streamConfigList1), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - Assert.assertEquals(expectPendingStreamsForDeletion ? expectedPendingStreams : Sets.newHashSet(), + Assert.assertEquals( + expectPendingStreamsForDeletion ? expectedPendingStreams : Sets.newHashSet(), scheduler.staleStreamDeletionMap().keySet()); } @Test public final void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriodWithAutoDetectionStrategy() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ZERO; - } - }); + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ZERO; + } + }); testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod(true, null); } @Test public final void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriodWithProvidedListStrategy() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return null; - } + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return null; + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ZERO; - } - }); - HashSet currentStreamConfigMapOverride = IntStream.range(1, 5).mapToObj( - streamId -> new StreamConfig(StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ZERO; + } + }); + HashSet currentStreamConfigMapOverride = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(HashSet::new)); testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod(false, currentStreamConfigMapOverride); @@ -631,57 +808,82 @@ public class SchedulerTest { @Test public final void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriodWithProvidedListStrategy2() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return IntStream.range(1, 3).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(ArrayList::new)); - } + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join( + streamId * TEST_ACCOUNT, + "multiStreamTest-" + streamId, + streamId * 12345))) + .collect(Collectors.toCollection(ArrayList::new)); + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ZERO; - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ZERO; + } + }); testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod(true, null); } - private final void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod(boolean expectSyncedStreams, Set currentStreamConfigMapOverride) + private void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod( + boolean expectSyncedStreams, Set currentStreamConfigMapOverride) throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(3, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(3, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); + mockListLeases(streamConfigList1); + Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); - Set expectedSyncedStreams = IntStream.range(1, 3).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(HashSet::new)); + Set expectedSyncedStreams = IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( + Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) + .collect(Collectors.toCollection(HashSet::new)); Assert.assertEquals(expectSyncedStreams ? expectedSyncedStreams : Sets.newHashSet(), syncedStreams); - Assert.assertEquals(currentStreamConfigMapOverride == null ? Sets.newHashSet(streamConfigList2) : currentStreamConfigMapOverride, + Assert.assertEquals( + currentStreamConfigMapOverride == null + ? Sets.newHashSet(streamConfigList2) + : currentStreamConfigMapOverride, Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - Assert.assertEquals(Sets.newHashSet(), - scheduler.staleStreamDeletionMap().keySet()); + Assert.assertEquals( + Sets.newHashSet(), scheduler.staleStreamDeletionMap().keySet()); } @Test - public final void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithAutoDetectionStrategy() - throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + public final void + testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithAutoDetectionStrategy() + throws DependencyException, ProvisionedThroughputException, InvalidStateException { + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediately(true, false); } @@ -693,131 +895,240 @@ public class SchedulerTest { } @Test - public final void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithProvidedListStrategy() - throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return null; - } + public final void + testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithProvidedListStrategy() + throws DependencyException, ProvisionedThroughputException, InvalidStateException { + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return null; + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediately(false, false); } @Test - public final void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithProvidedListStrategy2() - throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return IntStream.range(1, 3) - .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))) - .collect(Collectors.toCollection(ArrayList::new)); - } + public final void + testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithProvidedListStrategy2() + throws DependencyException, ProvisionedThroughputException, InvalidStateException { + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join( + streamId * TEST_ACCOUNT, + "multiStreamTest-" + streamId, + streamId * 12345))) + .collect(Collectors.toCollection(ArrayList::new)); + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediately(true, false); } - private final void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediately(boolean expectPendingStreamsForDeletion, - boolean onlyStreamsNoLeasesDeletion) + private void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediately( + boolean expectPendingStreamsForDeletion, boolean onlyStreamsNoLeasesDeletion) throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) - .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(3, 7).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) - .collect(Collectors.toCollection(LinkedList::new)); + List streamConfigList1 = createDummyStreamConfigList(1, 5); + List streamConfigList2 = createDummyStreamConfigList(3, 7); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); + // Mock listLeases to exercise the delete path so scheduler doesn't remove stale streams due to not presenting + // in lease table + mockListLeases(streamConfigList1); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); Set expectedSyncedStreams; Set expectedPendingStreams = IntStream.range(1, 3) .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))) + Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) .collect(Collectors.toCollection(HashSet::new)); - if(onlyStreamsNoLeasesDeletion) { + if (onlyStreamsNoLeasesDeletion) { expectedSyncedStreams = IntStream.concat(IntStream.range(1, 3), IntStream.range(5, 7)) - .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) .collect(Collectors.toCollection(HashSet::new)); } else { expectedSyncedStreams = IntStream.range(5, 7) - .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) .collect(Collectors.toCollection(HashSet::new)); } Assert.assertEquals(expectedSyncedStreams, syncedStreams); List expectedCurrentStreamConfigs; - if(onlyStreamsNoLeasesDeletion) { - expectedCurrentStreamConfigs = IntStream.range(3, 7).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + if (onlyStreamsNoLeasesDeletion) { + expectedCurrentStreamConfigs = IntStream.range(3, 7) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); } else { - expectedCurrentStreamConfigs = IntStream.range(1, 7).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + expectedCurrentStreamConfigs = IntStream.range(1, 7) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); } - Assert.assertEquals(Sets.newHashSet(expectedCurrentStreamConfigs), + Assert.assertEquals( + Sets.newHashSet(expectedCurrentStreamConfigs), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - Assert.assertEquals(expectPendingStreamsForDeletion ? expectedPendingStreams: Sets.newHashSet(), + Assert.assertEquals( + expectPendingStreamsForDeletion ? expectedPendingStreams : Sets.newHashSet(), scheduler.staleStreamDeletionMap().keySet()); } + @Test + public void testKinesisStaleDeletedStreamCleanup() + throws ProvisionedThroughputException, InvalidStateException, DependencyException { + List streamConfigList1 = createDummyStreamConfigList(1, 6); + List streamConfigList2 = createDummyStreamConfigList(1, 4); + + prepareForStaleDeletedStreamCleanupTests(streamConfigList1, streamConfigList2); + + // when KCL starts it starts with tracking 5 stream + assertEquals( + Sets.newHashSet(streamConfigList1), + Sets.newHashSet(scheduler.currentStreamConfigMap().values())); + assertEquals(0, scheduler.staleStreamDeletionMap().size()); + mockListLeases(streamConfigList1); + + // 2 Streams are no longer needed to be consumed + Set syncedStreams1 = scheduler.checkAndSyncStreamShardsAndLeases(); + assertEquals( + Sets.newHashSet(streamConfigList1), + Sets.newHashSet(scheduler.currentStreamConfigMap().values())); + assertEquals( + createDummyStreamConfigList(4, 6).stream() + .map(StreamConfig::streamIdentifier) + .collect(Collectors.toSet()), + scheduler.staleStreamDeletionMap().keySet()); + assertEquals(0, syncedStreams1.size()); + + StreamConfig deletedStreamConfig = createDummyStreamConfig(5); + // One stream is deleted from Kinesis side + scheduler.deletedStreamListProvider().add(deletedStreamConfig.streamIdentifier()); + + Set syncedStreams2 = scheduler.checkAndSyncStreamShardsAndLeases(); + + Set expectedCurrentStreamConfigs = Sets.newHashSet(streamConfigList1); + expectedCurrentStreamConfigs.remove(deletedStreamConfig); + + // assert kinesis deleted stream is cleaned up from KCL in memory state. + assertEquals( + expectedCurrentStreamConfigs, + Sets.newHashSet(scheduler.currentStreamConfigMap().values())); + assertEquals( + Sets.newHashSet(createDummyStreamConfig(4).streamIdentifier()), + Sets.newHashSet(scheduler.staleStreamDeletionMap().keySet())); + assertEquals(1, syncedStreams2.size()); + assertEquals( + 0, scheduler.deletedStreamListProvider().purgeAllDeletedStream().size()); + + verify(multiStreamTracker, times(3)).streamConfigList(); + } + + // Tests validate that no cleanup of stream is done if its still tracked in multiStreamTracker + @Test + public void testKinesisStaleDeletedStreamNoCleanUpForTrackedStream() + throws ProvisionedThroughputException, InvalidStateException, DependencyException { + List streamConfigList1 = createDummyStreamConfigList(1, 6); + prepareForStaleDeletedStreamCleanupTests(streamConfigList1); + + scheduler.deletedStreamListProvider().add(createDummyStreamConfig(3).streamIdentifier()); + + Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); + + assertEquals(0, syncedStreams.size()); + assertEquals(0, scheduler.staleStreamDeletionMap().size()); + assertEquals( + Sets.newHashSet(streamConfigList1), + Sets.newHashSet(scheduler.currentStreamConfigMap().values())); + } + + // Creates list of upperBound-lowerBound no of dummy StreamConfig + private List createDummyStreamConfigList(int lowerBound, int upperBound) { + return IntStream.range(lowerBound, upperBound) + .mapToObj(this::createDummyStreamConfig) + .collect(Collectors.toCollection(LinkedList::new)); + } + + private StreamConfig createDummyStreamConfig(int streamId) { + return new StreamConfig( + StreamIdentifier.multiStreamInstance( + Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); + } + @Test public final void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreDeletedAfterDefermentPeriod() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(3, 7).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(3, 7) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ZERO; - } - }); + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ZERO; + } + }); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); Set expectedSyncedStreams = IntStream.concat(IntStream.range(1, 3), IntStream.range(5, 7)) .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * 111111111, "multiStreamTest-" + streamId, streamId * 12345))) + Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) .collect(Collectors.toCollection(HashSet::new)); Assert.assertEquals(expectedSyncedStreams, syncedStreams); - Assert.assertEquals(Sets.newHashSet(streamConfigList2), + Assert.assertEquals( + Sets.newHashSet(streamConfigList2), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - Assert.assertEquals(Sets.newHashSet(), - scheduler.staleStreamDeletionMap().keySet()); + Assert.assertEquals( + Sets.newHashSet(), scheduler.staleStreamDeletionMap().keySet()); } @Test @@ -825,8 +1136,14 @@ public class SchedulerTest { final int maxInitializationAttempts = 1; coordinatorConfig.maxInitializationAttempts(maxInitializationAttempts); coordinatorConfig.skipShardSyncAtWorkerInitializationIfLeasesExist(false); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); doNothing().when(leaseCoordinator).initialize(); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); @@ -836,7 +1153,8 @@ public class SchedulerTest { long endTime = System.currentTimeMillis(); assertTrue(endTime - startTime > MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS); - assertTrue(endTime - startTime < (MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS + LEASE_TABLE_CHECK_FREQUENCY_MILLIS)); + assertTrue(endTime - startTime + < (MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS + LEASE_TABLE_CHECK_FREQUENCY_MILLIS)); } @Test @@ -844,8 +1162,14 @@ public class SchedulerTest { final int maxInitializationAttempts = 1; coordinatorConfig.maxInitializationAttempts(maxInitializationAttempts); coordinatorConfig.skipShardSyncAtWorkerInitializationIfLeasesExist(false); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); doNothing().when(leaseCoordinator).initialize(); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(false); @@ -860,9 +1184,11 @@ public class SchedulerTest { @Test public final void testSchedulerShutdown() { scheduler.shutdown(); - verify(workerStateChangeListener, times(1)).onWorkerStateChange(WorkerStateChangeListener.WorkerState.SHUT_DOWN_STARTED); + verify(workerStateChangeListener, times(1)) + .onWorkerStateChange(WorkerStateChangeListener.WorkerState.SHUT_DOWN_STARTED); verify(leaseCoordinator, times(1)).stop(); - verify(workerStateChangeListener, times(1)).onWorkerStateChange(WorkerStateChangeListener.WorkerState.SHUT_DOWN); + verify(workerStateChangeListener, times(1)) + .onWorkerStateChange(WorkerStateChangeListener.WorkerState.SHUT_DOWN); } @Test @@ -874,8 +1200,15 @@ public class SchedulerTest { when(eventFactory.rejectedTaskEvent(any(), any())).thenReturn(rejectedTaskEvent); when(eventFactory.executorStateEvent(any(), any())).thenReturn(executorStateEvent); - Scheduler testScheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, - lifecycleConfig, metricsConfig, processorConfig, retrievalConfig, eventFactory); + Scheduler testScheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig, + eventFactory); Scheduler schedulerSpy = spy(testScheduler); @@ -884,9 +1217,11 @@ public class SchedulerTest { .doCallRealMethod() .doAnswer(invocation -> { // trigger rejected task in RxJava layer - RxJavaPlugins.onError(new RejectedExecutionException("Test exception.")); - return null; - }).when(schedulerSpy).runProcessLoop(); + RxJavaPlugins.onError(new RejectedExecutionException("Test exception.")); + return null; + }) + .when(schedulerSpy) + .runProcessLoop(); // Scheduler sets error handler in initialize method schedulerSpy.initialize(); @@ -898,6 +1233,249 @@ public class SchedulerTest { verify(rejectedTaskEvent, times(1)).accept(any()); } + @Test + public void testUpdateStreamMapIfMissingLatestStream() throws Exception { + prepareMultiStreamScheduler(createDummyStreamConfigList(1, 6)); + scheduler.checkAndSyncStreamShardsAndLeases(); + verify(scheduler).syncStreamsFromLeaseTableOnAppInit(any()); + } + + @Test + public void testSyncLeaseAsThisIsInitialAppBootstrapEvenThoughStreamMapContainsAllStreams() { + final List streamConfigList = createDummyStreamConfigList(1, 6); + when(multiStreamTracker.streamConfigList()).thenReturn(Collections.emptyList()); + prepareMultiStreamScheduler(streamConfigList); + // Populate currentStreamConfigMap to simulate that the leader has the latest streams. + multiStreamTracker + .streamConfigList() + .forEach(s -> scheduler.currentStreamConfigMap().put(s.streamIdentifier(), s)); + scheduler.runProcessLoop(); + verify(scheduler).syncStreamsFromLeaseTableOnAppInit(any()); + assertTrue(scheduler.currentStreamConfigMap().size() != 0); + } + + @Test + public void testNotRefreshForNewStreamAfterLeaderFlippedTheShouldInitialize() { + prepareMultiStreamScheduler(createDummyStreamConfigList(1, 6)); + // flip the shouldInitialize flag + scheduler.runProcessLoop(); + verify(scheduler, times(1)).syncStreamsFromLeaseTableOnAppInit(any()); + + final List streamConfigList = createDummyStreamConfigList(1, 6); + when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList); + scheduler.runProcessLoop(); + + // Since the sync path has been executed once before the DDB sync flags should be flipped + // to prevent doing DDB lookups in the subsequent runs. + verify(scheduler, times(1)).syncStreamsFromLeaseTableOnAppInit(any()); + assertEquals( + 0, + streamConfigList.stream() + .filter(s -> !scheduler.currentStreamConfigMap().containsKey(s.streamIdentifier())) + .count()); + } + + @Test + public void testDropStreamsFromMapsWhenStreamIsNotInLeaseTableAndNewStreamConfigMap() throws Exception { + when(multiStreamTracker.streamConfigList()).thenReturn(Collections.emptyList()); + prepareMultiStreamScheduler(); + final List streamConfigList = createDummyStreamConfigList(1, 6); + streamConfigList.forEach(s -> scheduler.currentStreamConfigMap().put(s.streamIdentifier(), s)); + scheduler.checkAndSyncStreamShardsAndLeases(); + assertEquals(Collections.emptySet(), scheduler.currentStreamConfigMap().keySet()); + } + + @Test + public void testNotDropStreamsFromMapsWhenStreamIsInLeaseTable() throws Exception { + when(multiStreamTracker.streamConfigList()).thenReturn(Collections.emptyList()); + prepareForStaleDeletedStreamCleanupTests(); + final List streamConfigList = createDummyStreamConfigList(1, 6); + mockListLeases(streamConfigList); + streamConfigList.forEach(s -> scheduler.currentStreamConfigMap().put(s.streamIdentifier(), s)); + final Set initialSet = + new HashSet<>(scheduler.currentStreamConfigMap().keySet()); + scheduler.checkAndSyncStreamShardsAndLeases(); + assertEquals(initialSet, scheduler.currentStreamConfigMap().keySet()); + assertEquals( + streamConfigList.size(), + scheduler.currentStreamConfigMap().keySet().size()); + } + + @Test + public void testNotDropStreamsFromMapsWhenStreamIsInNewStreamConfigMap() throws Exception { + final List streamConfigList = createDummyStreamConfigList(1, 6); + when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList); + prepareMultiStreamScheduler(); + streamConfigList.forEach(s -> scheduler.currentStreamConfigMap().put(s.streamIdentifier(), s)); + final Set initialSet = + new HashSet<>(scheduler.currentStreamConfigMap().keySet()); + scheduler.checkAndSyncStreamShardsAndLeases(); + assertEquals(initialSet, scheduler.currentStreamConfigMap().keySet()); + assertEquals( + streamConfigList.size(), + scheduler.currentStreamConfigMap().keySet().size()); + } + + @SafeVarargs + private final void prepareMultiStreamScheduler(List... streamConfigs) { + retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) + .retrievalFactory(retrievalFactory); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); + stubMultiStreamTracker(streamConfigs); + when(scheduler.shouldSyncStreamsNow()).thenReturn(true); + } + + @SafeVarargs + private final void prepareForStaleDeletedStreamCleanupTests(List... streamConfigs) { + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofDays(1); + } + }); + stubMultiStreamTracker(streamConfigs); + prepareMultiStreamScheduler(); + } + + @SafeVarargs + private final void stubMultiStreamTracker(List... streamConfigs) { + if (streamConfigs.length > 0) { + OngoingStubbing> stub = when(multiStreamTracker.streamConfigList()); + for (List streamConfig : streamConfigs) { + stub = stub.thenReturn(streamConfig); + } + } + } + + private void mockListLeases(List configs) + throws ProvisionedThroughputException, InvalidStateException, DependencyException { + when(dynamoDBLeaseRefresher.listLeases()) + .thenReturn(configs.stream() + .map(s -> new MultiStreamLease() + .streamIdentifier(s.streamIdentifier().toString()) + .shardId("some_random_shard_id")) + .collect(Collectors.toList())); + } + + @Test + public void testStreamConfigsArePopulatedWithStreamArnsInMultiStreamMode() { + final String streamArnStr = constructStreamArnStr(TEST_REGION, 111122223333L, "some-stream-name"); + when(multiStreamTracker.streamConfigList()) + .thenReturn(Stream.of( + // Each of scheduler's currentStreamConfigMap entries should have a streamARN in + // multi-stream mode, regardless of whether the streamTracker-provided streamIdentifiers + // were created using serialization or stream ARN. + StreamIdentifier.multiStreamInstance( + constructStreamIdentifierSer(TEST_ACCOUNT, streamName)), + StreamIdentifier.multiStreamInstance(Arn.fromString(streamArnStr), TEST_EPOCH)) + .map(streamIdentifier -> new StreamConfig(streamIdentifier, TEST_INITIAL_POSITION)) + .collect(Collectors.toList())); + retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) + .retrievalFactory(retrievalFactory); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); + + final Set expectedStreamArns = + Sets.newHashSet(constructStreamArnStr(TEST_REGION, TEST_ACCOUNT, streamName), streamArnStr); + + final Set actualStreamArns = scheduler.currentStreamConfigMap().values().stream() + .map(sc -> sc.streamIdentifier() + .streamArnOptional() + .orElseThrow(IllegalStateException::new) + .toString()) + .collect(Collectors.toSet()); + + assertEquals(expectedStreamArns, actualStreamArns); + } + + @Test + public void testOrphanStreamConfigIsPopulatedWithArn() { + final String streamIdentifierSerializationForOrphan = constructStreamIdentifierSer(TEST_ACCOUNT, streamName); + assertFalse(multiStreamTracker.streamConfigList().stream() + .map(sc -> sc.streamIdentifier().serialize()) + .collect(Collectors.toSet()) + .contains(streamIdentifierSerializationForOrphan)); + + when(leaseCoordinator.getCurrentAssignments()) + .thenReturn(Collections.singletonList( + new ShardInfo(TEST_SHARD_ID, null, null, null, streamIdentifierSerializationForOrphan))); + retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) + .retrievalFactory(retrievalFactory); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); + + scheduler.runProcessLoop(); + + verify(multiStreamTracker) + .createStreamConfig(StreamIdentifier.multiStreamInstance(streamIdentifierSerializationForOrphan)); + + final ArgumentCaptor streamConfigArgumentCaptor = ArgumentCaptor.forClass(StreamConfig.class); + verify(retrievalFactory).createGetRecordsCache(any(), streamConfigArgumentCaptor.capture(), any()); + + final StreamConfig actualStreamConfigForOrphan = streamConfigArgumentCaptor.getValue(); + final Optional streamArnForOrphan = + actualStreamConfigForOrphan.streamIdentifier().streamArnOptional(); + assertTrue(streamArnForOrphan.isPresent()); + assertEquals( + constructStreamArnStr(TEST_REGION, TEST_ACCOUNT, streamName), + streamArnForOrphan.get().toString()); + } + + @Test + public void testMismatchingArnRegionAndKinesisClientRegionThrowsException() { + final Region streamArnRegion = Region.US_WEST_1; + Assert.assertNotEquals( + streamArnRegion, kinesisClient.serviceClientConfiguration().region()); + + when(multiStreamTracker.streamConfigList()) + .thenReturn(Collections.singletonList(new StreamConfig( + StreamIdentifier.multiStreamInstance( + Arn.fromString(constructStreamArnStr(streamArnRegion, TEST_ACCOUNT, streamName)), + TEST_EPOCH), + TEST_INITIAL_POSITION))); + retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) + .retrievalFactory(retrievalFactory); + + assertThrows( + IllegalArgumentException.class, + () -> new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); + } + + private static String constructStreamIdentifierSer(long accountId, String streamName) { + return String.join(":", String.valueOf(accountId), streamName, String.valueOf(TEST_EPOCH)); + } + + private static String constructStreamArnStr(Region region, long accountId, String streamName) { + return "arn:aws:kinesis:" + region + ":" + accountId + ":stream/" + streamName; + } + /*private void runAndTestWorker(int numShards, int threadPoolSize) throws Exception { final int numberOfRecordsPerShard = 10; final String kinesisShardPrefix = "kinesis-0-"; @@ -1071,9 +1649,7 @@ public class SchedulerTest { } @Override - public void leaseLost(LeaseLostInput leaseLostInput) { - - } + public void leaseLost(LeaseLostInput leaseLostInput) {} @Override public void shardEnded(ShardEndedInput shardEndedInput) { @@ -1085,9 +1661,7 @@ public class SchedulerTest { } @Override - public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) { - - } + public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) {} }; } @@ -1095,7 +1669,6 @@ public class SchedulerTest { public ShardRecordProcessor shardRecordProcessor(StreamIdentifier streamIdentifier) { return shardRecordProcessor(); } - } @RequiredArgsConstructor @@ -1115,9 +1688,11 @@ public class SchedulerTest { } @Override - public ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory, - StreamConfig streamConfig) { - if(shouldReturnDefaultShardSyncTaskmanager) { + public ShardSyncTaskManager createShardSyncTaskManager( + MetricsFactory metricsFactory, + StreamConfig streamConfig, + DeletedStreamListProvider deletedStreamListProvider) { + if (shouldReturnDefaultShardSyncTaskmanager) { return shardSyncTaskManager; } final ShardSyncTaskManager shardSyncTaskManager = mock(ShardSyncTaskManager.class); @@ -1129,7 +1704,7 @@ public class SchedulerTest { when(shardSyncTaskManager.hierarchicalShardSyncer()).thenReturn(hierarchicalShardSyncer); when(shardDetector.streamIdentifier()).thenReturn(streamConfig.streamIdentifier()); when(shardSyncTaskManager.callShardSyncTask()).thenReturn(new TaskResult(null)); - if(shardSyncFirstAttemptFailure) { + if (shardSyncFirstAttemptFailure) { when(shardDetector.listShards()) .thenThrow(new RuntimeException("Service Exception")) .thenReturn(Collections.EMPTY_LIST); @@ -1160,31 +1735,30 @@ public class SchedulerTest { private class TestKinesisCheckpointFactory implements CheckpointFactory { @Override - public Checkpointer createCheckpointer(final LeaseCoordinator leaseCoordinator, - final LeaseRefresher leaseRefresher) { + public Checkpointer createCheckpointer( + final LeaseCoordinator leaseCoordinator, final LeaseRefresher leaseRefresher) { return checkpoint; } } - // TODO: Upgrade to mockito >= 2.7.13, and use Spy on MultiStreamTracker to directly access the default methods without implementing TestMultiStreamTracker class + // TODO: Upgrade to mockito >= 2.7.13, and use Spy on MultiStreamTracker to directly access the default methods + // without implementing TestMultiStreamTracker class @NoArgsConstructor private class TestMultiStreamTracker implements MultiStreamTracker { @Override - public List streamConfigList(){ - return new ArrayList() {{ - add(new StreamConfig(StreamIdentifier.multiStreamInstance("acc1:stream1:1"), InitialPositionInStreamExtended.newInitialPosition( - InitialPositionInStream.LATEST))); - add(new StreamConfig(StreamIdentifier.multiStreamInstance("acc1:stream2:2"), InitialPositionInStreamExtended.newInitialPosition( - InitialPositionInStream.LATEST))); - add(new StreamConfig(StreamIdentifier.multiStreamInstance("acc2:stream1:1"), InitialPositionInStreamExtended.newInitialPosition( - InitialPositionInStream.LATEST))); - add(new StreamConfig(StreamIdentifier.multiStreamInstance("acc2:stream2:3"), InitialPositionInStreamExtended.newInitialPosition( - InitialPositionInStream.LATEST))); - }}; + public List streamConfigList() { + final InitialPositionInStreamExtended latest = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + + return Arrays.asList( + new StreamConfig(StreamIdentifier.multiStreamInstance("123456789012:stream1:1"), latest), + new StreamConfig(StreamIdentifier.multiStreamInstance("123456789012:stream2:2"), latest), + new StreamConfig(StreamIdentifier.multiStreamInstance("210987654321:stream1:1"), latest), + new StreamConfig(StreamIdentifier.multiStreamInstance("210987654321:stream2:3"), latest)); } @Override - public FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy(){ + public FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy() { return new AutoDetectionAndDeferredDeletionStrategy() { @Override public Duration waitPeriodToDeleteFormerStreams() { @@ -1193,5 +1767,4 @@ public class SchedulerTest { }; } } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java index 11d17368..a6ffcf0c 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java @@ -14,8 +14,6 @@ */ package software.amazon.kinesis.coordinator; -import java.util.concurrent.ThreadPoolExecutor; - /** * Unit tests of Worker. */ @@ -118,10 +116,11 @@ public class WorkerTest { private static final IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY_V2 = SAMPLE_RECORD_PROCESSOR_FACTORY; - - *//** + */ + /* * Test method for {@link Worker#getApplicationName()}. - *//* + */ + /* @Test public final void testGetStageName() { final String stageName = "testStageName"; @@ -346,10 +345,12 @@ public class WorkerTest { Assert.assertTrue(count > 0); } - *//** + */ + /* * Runs worker with threadPoolSize == numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testRunWithThreadPoolSizeEqualToNumShards() throws Exception { final int numShards = 1; @@ -357,10 +358,12 @@ public class WorkerTest { runAndTestWorker(numShards, threadPoolSize); } - *//** + */ + /* * Runs worker with threadPoolSize < numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testRunWithThreadPoolSizeLessThanNumShards() throws Exception { final int numShards = 3; @@ -368,10 +371,12 @@ public class WorkerTest { runAndTestWorker(numShards, threadPoolSize); } - *//** + */ + /* * Runs worker with threadPoolSize > numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testRunWithThreadPoolSizeMoreThanNumShards() throws Exception { final int numShards = 3; @@ -379,10 +384,12 @@ public class WorkerTest { runAndTestWorker(numShards, threadPoolSize); } - *//** + */ + /* * Runs worker with threadPoolSize < numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testOneSplitShard2Threads() throws Exception { final int threadPoolSize = 2; @@ -395,10 +402,12 @@ public class WorkerTest { runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard, config); } - *//** + */ + /* * Runs worker with threadPoolSize < numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testOneSplitShard2ThreadsWithCallsForEmptyRecords() throws Exception { final int threadPoolSize = 2; @@ -557,13 +566,15 @@ public class WorkerTest { verify(v2RecordProcessor, times(1)).shutdown(any(ShutdownInput.class)); } - *//** + */ + /* * This test is testing the {@link Worker}'s shutdown behavior and by extension the behavior of * {@link ThreadPoolExecutor#shutdownNow()}. It depends on the thread pool sending an interrupt to the pool threads. * This behavior makes the test a bit racy, since we need to ensure a specific order of events. * * @throws Exception - *//* + */ + /* @Test public final void testWorkerForcefulShutdown() throws Exception { final List shardList = createShardListWithOneShard(); @@ -1734,12 +1745,15 @@ public class WorkerTest { return new ReflectionFieldMatcher<>(itemClass, fieldName, fieldMatcher); } } - *//** + + */ + /* * Returns executor service that will be owned by the worker. This is useful to test the scenario * where worker shuts down the executor service also during shutdown flow. * * @return Executor service that will be owned by the worker. - *//* + */ + /* private WorkerThreadPoolExecutor getWorkerThreadPoolExecutor() { ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("ShardRecordProcessor-%04d").build(); return new WorkerThreadPoolExecutor(threadFactory); @@ -1756,9 +1770,6 @@ public class WorkerTest { return shards; } - *//** - * @return - *//* private List createShardListWithOneSplit() { List shards = new ArrayList(); SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("39428", "987324"); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java index 5e612ade..62272bbe 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java @@ -27,7 +27,7 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; /** * Mock LeaseRefresher by randomly throwing Leasing Exceptions. - * + * */ @RequiredArgsConstructor @Slf4j @@ -70,12 +70,13 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { } // Define which method should throw exception and when it should throw exception. - private ExceptionThrowingLeaseRefresherMethods methodThrowingException = ExceptionThrowingLeaseRefresherMethods.NONE; + private ExceptionThrowingLeaseRefresherMethods methodThrowingException = + ExceptionThrowingLeaseRefresherMethods.NONE; private int timeThrowingException = Integer.MAX_VALUE; /** * Set parameters used for throwing exception. - * + * * @param method which would throw exception * @param throwingTime defines what time to throw exception */ @@ -97,7 +98,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { // 1). method equals to methodThrowingException // 2). method calling count equals to what we want private void throwExceptions(String methodName, ExceptionThrowingLeaseRefresherMethods method) - throws DependencyException { + throws DependencyException { // Increase calling count for this method leaseRefresherMethodCallingCount[method.index()]++; if (method.equals(methodThrowingException) @@ -111,17 +112,16 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean createLeaseTableIfNotExists(Long readCapacity, Long writeCapacity) throws ProvisionedThroughputException, DependencyException { - throwExceptions("createLeaseTableIfNotExists", - ExceptionThrowingLeaseRefresherMethods.CREATELEASETABLEIFNOTEXISTS); + throwExceptions( + "createLeaseTableIfNotExists", ExceptionThrowingLeaseRefresherMethods.CREATELEASETABLEIFNOTEXISTS); return leaseRefresher.createLeaseTableIfNotExists(readCapacity, writeCapacity); } @Override - public boolean createLeaseTableIfNotExists() - throws ProvisionedThroughputException, DependencyException { - throwExceptions("createLeaseTableIfNotExists", - ExceptionThrowingLeaseRefresherMethods.CREATELEASETABLEIFNOTEXISTS); + public boolean createLeaseTableIfNotExists() throws ProvisionedThroughputException, DependencyException { + throwExceptions( + "createLeaseTableIfNotExists", ExceptionThrowingLeaseRefresherMethods.CREATELEASETABLEIFNOTEXISTS); return leaseRefresher.createLeaseTableIfNotExists(); } @@ -141,15 +141,15 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { } @Override - public List listLeasesForStream(StreamIdentifier streamIdentifier) throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public List listLeasesForStream(StreamIdentifier streamIdentifier) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("listLeasesForStream", ExceptionThrowingLeaseRefresherMethods.LISTLEASESFORSTREAM); return leaseRefresher.listLeasesForStream(streamIdentifier); } @Override - public List listLeases() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public List listLeases() throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("listLeases", ExceptionThrowingLeaseRefresherMethods.LISTLEASES); return leaseRefresher.listLeases(); @@ -157,7 +157,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean createLeaseIfNotExists(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("createLeaseIfNotExists", ExceptionThrowingLeaseRefresherMethods.CREATELEASEIFNOTEXISTS); return leaseRefresher.createLeaseIfNotExists(lease); @@ -165,7 +165,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean renewLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("renewLease", ExceptionThrowingLeaseRefresherMethods.RENEWLEASE); return leaseRefresher.renewLease(lease); @@ -173,7 +173,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean takeLease(Lease lease, String owner) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("takeLease", ExceptionThrowingLeaseRefresherMethods.TAKELEASE); return leaseRefresher.takeLease(lease, owner); @@ -181,7 +181,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean evictLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("evictLease", ExceptionThrowingLeaseRefresherMethods.EVICTLEASE); return leaseRefresher.evictLease(lease); @@ -189,7 +189,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public void deleteLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("deleteLease", ExceptionThrowingLeaseRefresherMethods.DELETELEASE); leaseRefresher.deleteLease(lease); @@ -197,7 +197,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean updateLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("updateLease", ExceptionThrowingLeaseRefresherMethods.UPDATELEASE); return leaseRefresher.updateLease(lease); @@ -205,7 +205,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public Lease getLease(String leaseKey) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("getLease", ExceptionThrowingLeaseRefresherMethods.GETLEASE); return leaseRefresher.getLease(leaseKey); @@ -219,8 +219,8 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { } @Override - public boolean isLeaseTableEmpty() throws DependencyException, - InvalidStateException, ProvisionedThroughputException { + public boolean isLeaseTableEmpty() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { return false; } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java index c390987c..e22a9126 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java @@ -18,17 +18,6 @@ package software.amazon.kinesis.leases; // TODO: Fix the lack of DynamoDB Loca // -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.math.BigInteger; import java.util.ArrayList; import java.util.Arrays; @@ -47,14 +36,15 @@ import java.util.stream.Stream; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.Validate; +import org.hamcrest.Matchers; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.model.HashKeyRange; +import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.services.kinesis.model.SequenceNumberRange; import software.amazon.awssdk.services.kinesis.model.Shard; import software.amazon.awssdk.services.kinesis.model.ShardFilter; @@ -63,31 +53,85 @@ import software.amazon.kinesis.common.HashKeyRangeForLease; import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.coordinator.DeletedStreamListProvider; import software.amazon.kinesis.exceptions.internal.KinesisClientLibIOException; import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; import software.amazon.kinesis.leases.exceptions.DependencyException; +import software.amazon.kinesis.leases.exceptions.InvalidStateException; +import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.metrics.MetricsScope; import software.amazon.kinesis.metrics.NullMetricsScope; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static software.amazon.kinesis.leases.HierarchicalShardSyncer.MemoizationContext; +import static software.amazon.kinesis.leases.HierarchicalShardSyncer.determineNewLeasesToCreate; @RunWith(MockitoJUnitRunner.class) -// CHECKSTYLE:IGNORE JavaNCSS FOR NEXT 800 LINES public class HierarchicalShardSyncerTest { - private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); - private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(1000L)); + private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(1000L)); private static final int EXPONENT = 128; - private static final String LEASE_OWNER = "TestOwnere"; + private static final String LEASE_OWNER = "TestOwner"; private static final MetricsScope SCOPE = new NullMetricsScope(); private static final boolean MULTISTREAM_MODE_ON = true; - private static final String STREAM_IDENTIFIER = "acc:stream:1"; - private static final HierarchicalShardSyncer.MultiStreamArgs MULTI_STREAM_ARGS = new HierarchicalShardSyncer.MultiStreamArgs( - MULTISTREAM_MODE_ON, StreamIdentifier.multiStreamInstance(STREAM_IDENTIFIER)); + private static final String STREAM_IDENTIFIER = "123456789012:stream:1"; + private static final HierarchicalShardSyncer.MultiStreamArgs MULTI_STREAM_ARGS = + new HierarchicalShardSyncer.MultiStreamArgs( + MULTISTREAM_MODE_ON, StreamIdentifier.multiStreamInstance(STREAM_IDENTIFIER)); + + /** + *
    +     * Shard structure (y-axis is
    +     * epochs): 0 1 2 3 4   5- shards till
    +     *          \ / \ / |   |
    +     *           6   7  4   5- shards from epoch 103 - 205
    +     *            \ /   |  /\
    +     *             8    4 9 10 -
    +     * shards from epoch 206 (open - no ending sequenceNumber)
    +     * 
    + */ + private static final List SHARD_GRAPH_A = Collections.unmodifiableList(constructShardListForGraphA()); + + /** + * Shard structure (x-axis is epochs): + *
    +     * 0  3   6   9
    +     * \ / \ / \ /
    +     *  2   5   8
    +     * / \ / \ / \
    +     * 1  4   7  10
    +     * 
    + */ + private static final List SHARD_GRAPH_B = Collections.unmodifiableList(constructShardListForGraphB()); + + /** + *
    +     * Shard structure (y-axis is
    +     * epochs):     0      1  2  3  - shards till
    +     *            /   \    |  \ /
    +     *           4     5   1   6  - shards from epoch 103 - 205
    +     *          / \   / \  |   |
    +     *         7   8 9  10 1   6
    +     * shards from epoch 206 (open - no ending sequenceNumber)
    +     * 
    + */ + private static final List SHARD_GRAPH_C = Collections.unmodifiableList(constructShardListForGraphC()); private final boolean ignoreUnexpectedChildShards = false; @@ -96,10 +140,12 @@ public class HierarchicalShardSyncerTest { /** * Old/Obsolete max value of a sequence number (2^128 -1). */ - public static final BigInteger MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE); + public static final BigInteger MAX_SEQUENCE_NUMBER = + new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE); @Mock private ShardDetector shardDetector; + @Mock private DynamoDBLeaseRefresher dynamoDBLeaseRefresher; @@ -121,22 +167,30 @@ public class HierarchicalShardSyncerTest { public void testDetermineNewLeasesToCreateNoShards() { final List shards = Collections.emptyList(); final List leases = Collections.emptyList(); - final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); - assertThat(HierarchicalShardSyncer.determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, leases, - INITIAL_POSITION_LATEST).isEmpty(), equalTo(true)); + final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = + new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); + assertTrue(determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, leases, INITIAL_POSITION_LATEST) + .isEmpty()); } /** * Test determineNewLeasesToCreate() where there are no shards for MultiStream */ - @Test public void testDetermineNewLeasesToCreateNoShardsForMultiStream() { + @Test + public void testDetermineNewLeasesToCreateNoShardsForMultiStream() { final List shards = Collections.emptyList(); final List leases = Collections.emptyList(); - final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); + final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = + new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); - assertThat(HierarchicalShardSyncer - .determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, leases, INITIAL_POSITION_LATEST, - new HashSet<>(), MULTI_STREAM_ARGS).isEmpty(), equalTo(true)); + assertTrue(determineNewLeasesToCreate( + emptyLeaseTableSynchronizer, + shards, + leases, + INITIAL_POSITION_LATEST, + Collections.emptySet(), + MULTI_STREAM_ARGS) + .isEmpty()); } /** @@ -148,20 +202,16 @@ public class HierarchicalShardSyncerTest { final String shardId1 = "shardId-1"; final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), + final List shards = Arrays.asList( + ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), ShardObjectHelper.newShard(shardId1, null, null, sequenceRange)); final List currentLeases = Collections.emptyList(); - final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); + final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = + new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(emptyLeaseTableSynchronizer, - shards, currentLeases, INITIAL_POSITION_LATEST); - validateHashRangeinLease(newLeases); - - final Set newLeaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set expectedLeaseShardIds = new HashSet<>(Arrays.asList(shardId0, shardId1)); - - assertThat(newLeases.size(), equalTo(expectedLeaseShardIds.size())); - assertThat(newLeaseKeys, equalTo(expectedLeaseShardIds)); + final List newLeases = + determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, currentLeases, INITIAL_POSITION_LATEST); + validateLeases(newLeases, shardId0, shardId1); } /** @@ -173,20 +223,21 @@ public class HierarchicalShardSyncerTest { final String shardId1 = "shardId-1"; final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), + final List shards = Arrays.asList( + ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), ShardObjectHelper.newShard(shardId1, null, null, sequenceRange)); final List currentLeases = Collections.emptyList(); - final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); + final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = + new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(emptyLeaseTableSynchronizer, - shards, currentLeases, INITIAL_POSITION_LATEST, new HashSet<>(), MULTI_STREAM_ARGS); - validateHashRangeinLease(newLeases); - final Set newLeaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set expectedLeaseIds = new HashSet<>( - toMultiStreamLeaseList(Arrays.asList(shardId0, shardId1))); - - assertThat(newLeases.size(), equalTo(expectedLeaseIds.size())); - assertThat(newLeaseKeys, equalTo(expectedLeaseIds)); + final List newLeases = determineNewLeasesToCreate( + emptyLeaseTableSynchronizer, + shards, + currentLeases, + INITIAL_POSITION_LATEST, + new HashSet<>(), + MULTI_STREAM_ARGS); + validateLeases(newLeases, toMultiStreamLeases(shardId0, shardId1)); } /** @@ -201,27 +252,30 @@ public class HierarchicalShardSyncerTest { final String shardId3 = "shardId-3"; final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); - final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard(shardId3, null, null, sequenceRange)); - final List shardsWithoutLeases = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), + final List shardsWithLeases = + Arrays.asList(ShardObjectHelper.newShard(shardId3, null, null, sequenceRange)); + final List shardsWithoutLeases = Arrays.asList( + ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), ShardObjectHelper.newShard(shardId1, null, null, sequenceRange), ShardObjectHelper.newShard(shardId2, shardId1, null, sequenceRange)); - final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases).flatMap(x -> x.stream()).collect(Collectors.toList()); - final List currentLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo"); + final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases) + .flatMap(x -> x.stream()) + .collect(Collectors.toList()); + final List currentLeases = + createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo"); final Set inconsistentShardIds = new HashSet<>(Collections.singletonList(shardId2)); Map shardIdToShardMap = HierarchicalShardSyncer.constructShardIdToShardMap(shards); - Map> shardIdToChildShardIdsMap = HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + Map> shardIdToChildShardIdsMap = + HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); final HierarchicalShardSyncer.LeaseSynchronizer leaseSynchronizer = - new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); + new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer( + shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, - INITIAL_POSITION_LATEST, inconsistentShardIds); - validateHashRangeinLease(newLeases); - final Set newLeaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set expectedLeaseShardIds = new HashSet<>(Arrays.asList(shardId0, shardId1)); - assertThat(newLeases.size(), equalTo(expectedLeaseShardIds.size())); - assertThat(newLeaseKeys, equalTo(expectedLeaseShardIds)); + final List newLeases = determineNewLeasesToCreate( + leaseSynchronizer, shards, currentLeases, INITIAL_POSITION_LATEST, inconsistentShardIds); + validateLeases(newLeases, shardId0, shardId1); } /** @@ -236,37 +290,57 @@ public class HierarchicalShardSyncerTest { final String shardId3 = "shardId-3"; final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); - final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard(shardId3, null, null, sequenceRange)); - final List shardsWithoutLeases = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), + final List shardsWithLeases = + Arrays.asList(ShardObjectHelper.newShard(shardId3, null, null, sequenceRange)); + final List shardsWithoutLeases = Arrays.asList( + ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), ShardObjectHelper.newShard(shardId1, null, null, sequenceRange), ShardObjectHelper.newShard(shardId2, shardId1, null, sequenceRange)); - final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases).flatMap(x -> x.stream()).collect(Collectors.toList()); - final List currentLeases = new ArrayList(createMultiStreamLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo")); + final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases) + .flatMap(x -> x.stream()) + .collect(Collectors.toList()); + final List currentLeases = new ArrayList<>( + createMultiStreamLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo")); final Set inconsistentShardIds = new HashSet<>(Collections.singletonList(shardId2)); Map shardIdToShardMap = HierarchicalShardSyncer.constructShardIdToShardMap(shards); - Map> shardIdToChildShardIdsMap = HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + Map> shardIdToChildShardIdsMap = + HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); final HierarchicalShardSyncer.LeaseSynchronizer leaseSynchronizer = - new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); + new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer( + shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, - INITIAL_POSITION_LATEST, inconsistentShardIds, MULTI_STREAM_ARGS); - final Set newLeaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - validateHashRangeinLease(newLeases); - final Set expectedLeaseShardIds = new HashSet<>( - toMultiStreamLeaseList(Arrays.asList(shardId0, shardId1))); - assertThat(newLeases.size(), equalTo(expectedLeaseShardIds.size())); - assertThat(newLeaseKeys, equalTo(expectedLeaseShardIds)); + final List newLeases = determineNewLeasesToCreate( + leaseSynchronizer, + shards, + currentLeases, + INITIAL_POSITION_LATEST, + inconsistentShardIds, + MULTI_STREAM_ARGS); + validateLeases(newLeases, toMultiStreamLeases(shardId0, shardId1)); } - private void validateHashRangeinLease(List leases) { + private static void validateHashRangeInLease(List leases) { final Consumer leaseValidation = lease -> { Validate.notNull(lease.hashKeyRangeForLease()); - Validate.isTrue(lease.hashKeyRangeForLease().startingHashKey() - .compareTo(lease.hashKeyRangeForLease().endingHashKey()) < 0); + Validate.isTrue(lease.hashKeyRangeForLease() + .startingHashKey() + .compareTo(lease.hashKeyRangeForLease().endingHashKey()) + < 0); }; - leases.forEach(lease -> leaseValidation.accept(lease)); + leases.forEach(leaseValidation); + } + + /** + * Validates that a {@link Lease} exists for each expected lease key. + */ + private static void validateLeases(final List leases, final String... expectedLeaseKeys) { + validateHashRangeInLease(leases); + assertEquals(expectedLeaseKeys.length, leases.size()); + + final Set leaseKeys = leases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); + assertThat(leaseKeys, Matchers.containsInAnyOrder(expectedLeaseKeys)); } /** @@ -285,194 +359,154 @@ public class HierarchicalShardSyncerTest { testCheckAndCreateLeasesForShardsIfMissing(INITIAL_POSITION_LATEST); } + private void testLeaseCreation( + final List shards, final boolean ignoreUnexpectedChildShards, final String... expectedLeaseKeys) + throws Exception { + final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); + + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenReturn(shards); + when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); + when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())) + .thenReturn(true); + + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); + + final List requestLeases = leaseCaptor.getAllValues(); + final Set extendedSequenceNumbers = + requestLeases.stream().map(Lease::checkpoint).collect(Collectors.toSet()); + + validateLeases(requestLeases, expectedLeaseKeys); + assertEquals(1, extendedSequenceNumbers.size()); + + extendedSequenceNumbers.forEach(seq -> assertEquals(ExtendedSequenceNumber.LATEST, seq)); + + verify(shardDetector, never()).listShards(); + verify(shardDetector).listShardsWithoutConsumingResourceNotFoundException(); + verify(dynamoDBLeaseRefresher, times(requestLeases.size())).createLeaseIfNotExists(any(Lease.class)); + verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); + } + /** * Test checkAndCreateLeaseForNewShards while not providing a pre-fetched list of shards */ @Test public void testCheckAndCreateLeasesForShardsIfMissingAtLatest() throws Exception { - final List shards = constructShardListForGraphA(); - - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); - - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, false, dynamoDBLeaseRefresher.isLeaseTableEmpty()); - - final Set expectedShardIds = new HashSet<>( - Arrays.asList("shardId-4", "shardId-8", "shardId-9", "shardId-10")); - - final List requestLeases = leaseCaptor.getAllValues(); - final Set requestLeaseKeys = requestLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set extendedSequenceNumbers = requestLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - validateHashRangeinLease(requestLeases); - assertThat(requestLeases.size(), equalTo(expectedShardIds.size())); - assertThat(requestLeaseKeys, equalTo(expectedShardIds)); - assertThat(extendedSequenceNumbers.size(), equalTo(1)); - - extendedSequenceNumbers.forEach(seq -> assertThat(seq, equalTo(ExtendedSequenceNumber.LATEST))); - - verify(shardDetector).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedShardIds.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - + testLeaseCreation(SHARD_GRAPH_A, false, "shardId-4", "shardId-8", "shardId-9", "shardId-10"); } @Test public void testCheckAndCreateLeasesForShardsIfMissingAtLatestMultiStream() throws Exception { - final List shards = constructShardListForGraphA(); - - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); setupMultiStream(); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, false, dynamoDBLeaseRefresher.isLeaseTableEmpty()); - - final Set expectedShardIds = new HashSet<>( - toMultiStreamLeaseList(Arrays.asList("shardId-4", "shardId-8", "shardId-9", "shardId-10"))); - - final List requestLeases = leaseCaptor.getAllValues(); - final Set requestLeaseKeys = requestLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set extendedSequenceNumbers = requestLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - validateHashRangeinLease(requestLeases); - assertThat(requestLeases.size(), equalTo(expectedShardIds.size())); - assertThat(requestLeaseKeys, equalTo(expectedShardIds)); - assertThat(extendedSequenceNumbers.size(), equalTo(1)); - - extendedSequenceNumbers.forEach(seq -> assertThat(seq, equalTo(ExtendedSequenceNumber.LATEST))); - - verify(shardDetector).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedShardIds.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - + testLeaseCreation( + SHARD_GRAPH_A, false, toMultiStreamLeases("shardId-4", "shardId-8", "shardId-9", "shardId-10")); } - private List toMultiStreamLeaseList(List shardIdBasedLeases) { - return shardIdBasedLeases.stream().map(s -> STREAM_IDENTIFIER + ":" + s) - .collect(Collectors.toList()); + /** + * Converts one-or-more shard ids to their multi-stream equivalent. + * + * @param shardIds vararg of shard ids (i.e., {@code shardId-}) + * @return a same-sized array where the Nth element is the multi-stream + * equivalent of the Nth {@code shardIds} input + */ + private static String[] toMultiStreamLeases(final String... shardIds) { + final String[] multiStreamLeaseKey = new String[shardIds.length]; + for (int i = 0; i < shardIds.length; i++) { + multiStreamLeaseKey[i] = STREAM_IDENTIFIER + ":" + shardIds[i]; + } + return multiStreamLeaseKey; + } + + private void testCheckAndCreateLeasesForShardsWithShardList(final String... expectedLeaseKeys) throws Exception { + final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); + when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())) + .thenReturn(true); + + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SHARD_GRAPH_A, + false, + SCOPE, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); + + final List requestLeases = leaseCaptor.getAllValues(); + final Set extendedSequenceNumbers = + requestLeases.stream().map(Lease::checkpoint).collect(Collectors.toSet()); + + validateLeases(requestLeases, expectedLeaseKeys); + assertEquals(1, extendedSequenceNumbers.size()); + + extendedSequenceNumbers.forEach(seq -> assertEquals(ExtendedSequenceNumber.LATEST, seq)); + + verify(shardDetector, never()).listShards(); + verify(shardDetector, never()).listShardsWithoutConsumingResourceNotFoundException(); + verify(dynamoDBLeaseRefresher, times(requestLeases.size())).createLeaseIfNotExists(any(Lease.class)); + verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); } /** * Test checkAndCreateLeaseForNewShards with a pre-fetched list of shards. In this scenario, shardDetector.listShards() - * should never be called. + * or shardDetector.listShardsWithoutConsumingResourceNotFoundException() should never be called. */ @Test public void testCheckAndCreateLeasesForShardsWithShardList() throws Exception { - final List latestShards = constructShardListForGraphA(); - - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - when(shardDetector.listShards()).thenReturn(latestShards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); - - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - latestShards, false, SCOPE, - dynamoDBLeaseRefresher.isLeaseTableEmpty()); - - final Set expectedShardIds = new HashSet<>( - Arrays.asList("shardId-4", "shardId-8", "shardId-9", "shardId-10")); - - final List requestLeases = leaseCaptor.getAllValues(); - final Set requestLeaseKeys = requestLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set extendedSequenceNumbers = requestLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - assertThat(requestLeases.size(), equalTo(expectedShardIds.size())); - assertThat(requestLeaseKeys, equalTo(expectedShardIds)); - assertThat(extendedSequenceNumbers.size(), equalTo(1)); - - validateHashRangeinLease(requestLeases); - - extendedSequenceNumbers.forEach(seq -> assertThat(seq, equalTo(ExtendedSequenceNumber.LATEST))); - - verify(shardDetector, never()).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedShardIds.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); + testCheckAndCreateLeasesForShardsWithShardList("shardId-4", "shardId-8", "shardId-9", "shardId-10"); } /** * Test checkAndCreateLeaseForNewShards with a pre-fetched list of shards. In this scenario, shardDetector.listShards() - * should never be called. + * or shardDetector.listShardsWithoutConsumingResourceNotFoundException() should never be called. */ @Test public void testCheckAndCreateLeasesForShardsWithShardListMultiStream() throws Exception { - final List latestShards = constructShardListForGraphA(); - - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - when(shardDetector.listShards()).thenReturn(latestShards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); setupMultiStream(); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - latestShards, false, SCOPE, - dynamoDBLeaseRefresher.isLeaseTableEmpty()); - - final Set expectedShardIds = new HashSet<>( - toMultiStreamLeaseList(Arrays.asList("shardId-4", "shardId-8", "shardId-9", "shardId-10"))); - - final List requestLeases = leaseCaptor.getAllValues(); - final Set requestLeaseKeys = requestLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set extendedSequenceNumbers = requestLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - assertThat(requestLeases.size(), equalTo(expectedShardIds.size())); - assertThat(requestLeaseKeys, equalTo(expectedShardIds)); - assertThat(extendedSequenceNumbers.size(), equalTo(1)); - validateHashRangeinLease(requestLeases); - extendedSequenceNumbers.forEach(seq -> assertThat(seq, equalTo(ExtendedSequenceNumber.LATEST))); - - verify(shardDetector, never()).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedShardIds.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); + testCheckAndCreateLeasesForShardsWithShardList( + toMultiStreamLeases("shardId-4", "shardId-8", "shardId-9", "shardId-10")); } /** * Test checkAndCreateLeaseForNewShards with an empty list of shards. In this scenario, shardDetector.listShards() - * should never be called. + * or shardDetector.listShardsWithoutConsumingResourceNotFoundException() should never be called. */ @Test public void testCheckAndCreateLeasesForShardsWithEmptyShardList() throws Exception { - final List shards = constructShardListForGraphA(); - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - when(shardDetector.listShards()).thenReturn(shards); + when(shardDetector.listShards()).thenReturn(SHARD_GRAPH_A); when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); + when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())) + .thenReturn(true); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - new ArrayList(), false, SCOPE, - dynamoDBLeaseRefresher.isLeaseTableEmpty()); - - final Set expectedShardIds = new HashSet<>(); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + new ArrayList<>(), + false, + SCOPE, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); final List requestLeases = leaseCaptor.getAllValues(); - final Set requestLeaseKeys = requestLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set extendedSequenceNumbers = requestLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - validateHashRangeinLease(requestLeases); - assertThat(requestLeases.size(), equalTo(expectedShardIds.size())); - assertThat(extendedSequenceNumbers.size(), equalTo(0)); + final Set extendedSequenceNumbers = + requestLeases.stream().map(Lease::checkpoint).collect(Collectors.toSet()); + validateLeases(requestLeases); + assertEquals(0, extendedSequenceNumbers.size()); verify(shardDetector, never()).listShards(); + verify(shardDetector, never()).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, never()).createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); } - /* + /** + *
          * Shard structure (each level depicts a stream segment):
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
    @@ -481,16 +515,18 @@ public class HierarchicalShardSyncerTest {
          *    8    4 9  10 - shards from epoch 206 (open - no ending sequenceNumber)
          * Initial position: TRIM_HORIZON
          * Leases to create: (0, 1, 2, 3, 4, 5)
    +     * 
    */ @Test public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonWithEmptyLeaseTable() throws Exception { - final List shards = constructShardListForGraphA(); - final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-0", "shardId-1", "shardId-2", - "shardId-3", "shardId-4", "shardId-5")); - testCheckAndCreateLeaseForShardsIfMissing(shards, INITIAL_POSITION_TRIM_HORIZON, expectedLeaseKeysToCreate); + final Set expectedLeaseKeysToCreate = new HashSet<>( + Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5")); + testCheckAndCreateLeaseForShardsIfMissing( + SHARD_GRAPH_A, INITIAL_POSITION_TRIM_HORIZON, expectedLeaseKeysToCreate); } - /* + /** + *
          * Shard structure (each level depicts a stream segment):
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
    @@ -499,16 +535,18 @@ public class HierarchicalShardSyncerTest {
          *    8    4 9  10 - shards from epoch 206 (open - no ending sequenceNumber)
          * Initial position: AT_TIMESTAMP(1000)
          * Leases to create: (8, 4, 9, 10)
    +     * 
    */ @Test public void testCheckAndCreateLeasesForNewShardsAtTimestampWithEmptyLeaseTable1() throws Exception { - final List shards = constructShardListForGraphA(); - final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-8", "shardId-4", "shardId-9", - "shardId-10")); - testCheckAndCreateLeaseForShardsIfMissing(shards, INITIAL_POSITION_AT_TIMESTAMP, expectedLeaseKeysToCreate); + final Set expectedLeaseKeysToCreate = + new HashSet<>(Arrays.asList("shardId-8", "shardId-4", "shardId-9", "shardId-10")); + testCheckAndCreateLeaseForShardsIfMissing( + SHARD_GRAPH_A, INITIAL_POSITION_AT_TIMESTAMP, expectedLeaseKeysToCreate); } - /* + /** + *
          * Shard structure (each level depicts a stream segment):
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
    @@ -517,18 +555,19 @@ public class HierarchicalShardSyncerTest {
          *    8    4 9  10 - shards from epoch 206 (open - no ending sequenceNumber)
          * Initial position: AT_TIMESTAMP(200)
          * Leases to create: (6, 7, 4, 5)
    +     * 
    */ @Test public void testCheckAndCreateLeasesForNewShardsAtTimestampWithEmptyLeaseTable2() throws Exception { - final List shards = constructShardListForGraphA(); - final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-6", "shardId-7", "shardId-4", - "shardId-5")); - final InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(200L)); - testCheckAndCreateLeaseForShardsIfMissing(shards, initialPosition, expectedLeaseKeysToCreate); + final Set expectedLeaseKeysToCreate = + new HashSet<>(Arrays.asList("shardId-6", "shardId-7", "shardId-4", "shardId-5")); + final InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(200L)); + testCheckAndCreateLeaseForShardsIfMissing(SHARD_GRAPH_A, initialPosition, expectedLeaseKeysToCreate); } - /* + /** + *
          * Shard structure (each level depicts a stream segment):
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
    @@ -537,16 +576,17 @@ public class HierarchicalShardSyncerTest {
          *    8    4 9  10 - shards from epoch 206 (open - no ending sequenceNumber)
          * Initial position: LATEST
          * Leases to create: (8, 4, 9, 10)
    +     * 
    */ @Test public void testCheckAndCreateLeasesForNewShardsAtLatestWithEmptyLeaseTable() throws Exception { - final List shards = constructShardListForGraphA(); - final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-8", "shardId-4", "shardId-9", - "shardId-10")); - testCheckAndCreateLeaseForShardsIfMissing(shards, INITIAL_POSITION_LATEST, expectedLeaseKeysToCreate); + final Set expectedLeaseKeysToCreate = + new HashSet<>(Arrays.asList("shardId-8", "shardId-4", "shardId-9", "shardId-10")); + testCheckAndCreateLeaseForShardsIfMissing(SHARD_GRAPH_A, INITIAL_POSITION_LATEST, expectedLeaseKeysToCreate); } - /* + /** + *
          * Shard structure (each level depicts a stream segment):
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
    @@ -556,22 +596,27 @@ public class HierarchicalShardSyncerTest {
          * Missing leases: (0, 6, 8)
          * Initial position: TRIM_HORIZON
          * Leases to create: (0)
    +     * 
    */ @Test public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonWithPartialLeaseTable() throws Exception { - final List shards = constructShardListForGraphA(); + final List shards = SHARD_GRAPH_A; // Leases for shard-0 and its descendants (shard-6, and shard-8) are missing. Expect lease sync to recover the // lease for shard-0 when reading from TRIM_HORIZON. final Set missingLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-6", "shardId-8")); final List shardsWithLeases = shards.stream() - .filter(s -> !missingLeaseKeys.contains(s.shardId())).collect(Collectors.toList()); - final List existingLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.TRIM_HORIZON, LEASE_OWNER); + .filter(s -> !missingLeaseKeys.contains(s.shardId())) + .collect(Collectors.toList()); + final List existingLeases = + createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.TRIM_HORIZON, LEASE_OWNER); - final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-0")); - testCheckAndCreateLeaseForShardsIfMissing(shards, INITIAL_POSITION_TRIM_HORIZON, expectedLeaseKeysToCreate, existingLeases); + final Set expectedLeaseKeysToCreate = Collections.singleton("shardId-0"); + testCheckAndCreateLeaseForShardsIfMissing( + shards, INITIAL_POSITION_TRIM_HORIZON, expectedLeaseKeysToCreate, existingLeases); } - /* + /** + *
          * Shard structure (each level depicts a stream segment):
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
    @@ -581,22 +626,27 @@ public class HierarchicalShardSyncerTest {
          * Missing leases: (0, 6, 8)
          * Initial position: AT_TIMESTAMP(1000)
          * Leases to create: (0)
    +     * 
    */ @Test public void testCheckAndCreateLeasesForNewShardsAtTimestampWithPartialLeaseTable1() throws Exception { - final List shards = constructShardListForGraphA(); + final List shards = SHARD_GRAPH_A; // Leases for shard-0 and its descendants (shard-6, and shard-8) are missing. Expect lease sync to recover the // lease for shard-0 when reading from AT_TIMESTAMP. final Set missingLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-6", "shardId-8")); final List shardsWithLeases = shards.stream() - .filter(s -> !missingLeaseKeys.contains(s.shardId())).collect(Collectors.toList()); - final List existingLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.AT_TIMESTAMP, LEASE_OWNER); + .filter(s -> !missingLeaseKeys.contains(s.shardId())) + .collect(Collectors.toList()); + final List existingLeases = + createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.AT_TIMESTAMP, LEASE_OWNER); - final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-0")); - testCheckAndCreateLeaseForShardsIfMissing(shards, INITIAL_POSITION_AT_TIMESTAMP, expectedLeaseKeysToCreate, existingLeases); + final Set expectedLeaseKeysToCreate = Collections.singleton("shardId-0"); + testCheckAndCreateLeaseForShardsIfMissing( + shards, INITIAL_POSITION_AT_TIMESTAMP, expectedLeaseKeysToCreate, existingLeases); } - /* + /** + *
          * Shard structure (each level depicts a stream segment):
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
    @@ -606,24 +656,28 @@ public class HierarchicalShardSyncerTest {
          * Missing leases: (0, 6, 8)
          * Initial position: AT_TIMESTAMP(200)
          * Leases to create: (0)
    +     * 
    */ @Test public void testCheckAndCreateLeasesForNewShardsAtTimestampWithPartialLeaseTable2() throws Exception { - final List shards = constructShardListForGraphA(); - final InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(200L)); + final List shards = SHARD_GRAPH_A; + final InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(200L)); // Leases for shard-0 and its descendants (shard-6, and shard-8) are missing. Expect lease sync to recover the // lease for shard-0 when reading from AT_TIMESTAMP. final Set missingLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-6", "shardId-8")); final List shardsWithLeases = shards.stream() - .filter(s -> !missingLeaseKeys.contains(s.shardId())).collect(Collectors.toList()); - final List existingLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.AT_TIMESTAMP, LEASE_OWNER); + .filter(s -> !missingLeaseKeys.contains(s.shardId())) + .collect(Collectors.toList()); + final List existingLeases = + createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.AT_TIMESTAMP, LEASE_OWNER); - final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-0")); + final Set expectedLeaseKeysToCreate = Collections.singleton("shardId-0"); testCheckAndCreateLeaseForShardsIfMissing(shards, initialPosition, expectedLeaseKeysToCreate, existingLeases); } - /* + /** + *
          * Shard structure (each level depicts a stream segment):
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
    @@ -633,215 +687,195 @@ public class HierarchicalShardSyncerTest {
          * Missing leases: (0, 6, 8)
          * Initial position: LATEST
          * Leases to create: (0)
    +     * 
    */ @Test public void testCheckAndCreateLeasesForNewShardsAtLatestWithPartialLeaseTable() throws Exception { - final List shards = constructShardListForGraphA(); + final List shards = SHARD_GRAPH_A; // Leases for shard-0 and its descendants (shard-6, and shard-8) are missing. Expect lease sync to recover the // lease for shard-0 when reading from LATEST. final Set missingLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-6", "shardId-8")); final List shardsWithLeases = shards.stream() - .filter(s -> !missingLeaseKeys.contains(s.shardId())).collect(Collectors.toList()); - final List existingLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, LEASE_OWNER); + .filter(s -> !missingLeaseKeys.contains(s.shardId())) + .collect(Collectors.toList()); + final List existingLeases = + createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, LEASE_OWNER); - final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-0")); - testCheckAndCreateLeaseForShardsIfMissing(shards, INITIAL_POSITION_LATEST, expectedLeaseKeysToCreate, existingLeases); + final Set expectedLeaseKeysToCreate = Collections.singleton("shardId-0"); + testCheckAndCreateLeaseForShardsIfMissing( + shards, INITIAL_POSITION_LATEST, expectedLeaseKeysToCreate, existingLeases); } @Test(expected = KinesisClientLibIOException.class) public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpen() throws Exception { - final List shards = new ArrayList<>(constructShardListForGraphA()); - final SequenceNumberRange range = shards.get(0).sequenceNumberRange().toBuilder().endingSequenceNumber(null) + final List shards = new ArrayList<>(SHARD_GRAPH_A); + final SequenceNumberRange range = shards.get(0).sequenceNumberRange().toBuilder() + .endingSequenceNumber(null) .build(); final Shard shard = shards.get(3).toBuilder().sequenceNumberRange(range).build(); shards.remove(3); shards.add(3, shard); - when(shardDetector.listShards()).thenReturn(shards); + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenReturn(shards); try { - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, - INITIAL_POSITION_TRIM_HORIZON, SCOPE, false, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_TRIM_HORIZON, + SCOPE, + false, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); } finally { - verify(shardDetector).listShards(); + verify(shardDetector).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, never()).listLeases(); } } @Test(expected = KinesisClientLibIOException.class) public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpenForMultiStream() throws Exception { - final List shards = new ArrayList<>(constructShardListForGraphA()); - final SequenceNumberRange range = shards.get(0).sequenceNumberRange().toBuilder().endingSequenceNumber(null) + final List shards = new ArrayList<>(SHARD_GRAPH_A); + final SequenceNumberRange range = shards.get(0).sequenceNumberRange().toBuilder() + .endingSequenceNumber(null) .build(); final Shard shard = shards.get(3).toBuilder().sequenceNumberRange(range).build(); shards.remove(3); shards.add(3, shard); - when(shardDetector.listShards()).thenReturn(shards); + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenReturn(shards); setupMultiStream(); try { - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, - INITIAL_POSITION_TRIM_HORIZON, SCOPE, false, + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_TRIM_HORIZON, + SCOPE, + false, dynamoDBLeaseRefresher.isLeaseTableEmpty()); } finally { - verify(shardDetector).listShards(); + verify(shardDetector).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, never()).listLeases(); } } + private void testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildren( + final String... expectedLeaseKeys) throws Exception { + final List shards = new ArrayList<>(SHARD_GRAPH_A); + final Shard shard = shards.get(5); + assertEquals("shardId-5", shard.shardId()); + + shards.remove(5); + + // shardId-5 in graph A has two children (shardId-9 and shardId-10). if shardId-5 + // is not closed, those children should be ignored when syncing shards, no leases + // should be obtained for them, and we should obtain a lease on the still-open + // parent. + shards.add( + 5, + shard.toBuilder() + .sequenceNumberRange(shard.sequenceNumberRange().toBuilder() + .endingSequenceNumber(null) + .build()) + .build()); + + testLeaseCreation(shards, true, expectedLeaseKeys); + } + /** * Test checkAndCreateLeasesForNewShards() when a parent is open and children of open parents are being ignored. */ @Test public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildren() throws Exception { - final List shards = new ArrayList<>(constructShardListForGraphA()); - final Shard shard = shards.get(5); - assertThat(shard.shardId(), equalTo("shardId-5")); - - shards.remove(5); - - // shardId-5 in graph A has two children (shardId-9 and shardId-10). if shardId-5 - // is not closed, those children should be ignored when syncing shards, no leases - // should be obtained for them, and we should obtain a lease on the still-open - // parent. - shards.add(5, - shard.toBuilder() - .sequenceNumberRange(shard.sequenceNumberRange().toBuilder().endingSequenceNumber(null).build()) - .build()); - - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); - - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, true, dynamoDBLeaseRefresher.isLeaseTableEmpty()); - - final List leases = leaseCaptor.getAllValues(); - final Set leaseKeys = leases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set leaseSequenceNumbers = leases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - final Set expectedShardIds = new HashSet<>(Arrays.asList("shardId-4", "shardId-5", "shardId-8")); - - assertThat(leaseKeys.size(), equalTo(expectedShardIds.size())); - assertThat(leaseKeys, equalTo(expectedShardIds)); - assertThat(leaseSequenceNumbers.size(), equalTo(1)); - - leaseSequenceNumbers.forEach(seq -> assertThat(seq, equalTo(ExtendedSequenceNumber.LATEST))); - - verify(shardDetector).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedShardIds.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); + testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildren( + "shardId-4", "shardId-5", "shardId-8"); } @Test - public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildrenMultiStream() throws Exception { - final List shards = new ArrayList<>(constructShardListForGraphA()); - final Shard shard = shards.get(5); - assertThat(shard.shardId(), equalTo("shardId-5")); - - shards.remove(5); - - // shardId-5 in graph A has two children (shardId-9 and shardId-10). if shardId-5 - // is not closed, those children should be ignored when syncing shards, no leases - // should be obtained for them, and we should obtain a lease on the still-open - // parent. - shards.add(5, - shard.toBuilder() - .sequenceNumberRange(shard.sequenceNumberRange().toBuilder().endingSequenceNumber(null).build()) - .build()); - - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); + public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildrenMultiStream() + throws Exception { setupMultiStream(); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, true, dynamoDBLeaseRefresher.isLeaseTableEmpty()); - - final List leases = leaseCaptor.getAllValues(); - final Set leaseKeys = leases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set leaseSequenceNumbers = leases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - final Set expectedShardIds = new HashSet<>(toMultiStreamLeaseList(Arrays.asList("shardId-4", "shardId-5", "shardId-8"))); - - assertThat(leaseKeys.size(), equalTo(expectedShardIds.size())); - assertThat(leaseKeys, equalTo(expectedShardIds)); - assertThat(leaseSequenceNumbers.size(), equalTo(1)); - - leaseSequenceNumbers.forEach(seq -> assertThat(seq, equalTo(ExtendedSequenceNumber.LATEST))); - - verify(shardDetector).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedShardIds.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); + testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildren( + toMultiStreamLeases("shardId-4", "shardId-5", "shardId-8")); } @Test public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShard() throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShard(ExtendedSequenceNumber.TRIM_HORIZON, - INITIAL_POSITION_TRIM_HORIZON); + testCheckAndCreateLeasesForNewShardsAndClosedShard( + ExtendedSequenceNumber.TRIM_HORIZON, INITIAL_POSITION_TRIM_HORIZON); } @Test public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShard() throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShard(ExtendedSequenceNumber.AT_TIMESTAMP, - INITIAL_POSITION_AT_TIMESTAMP); + testCheckAndCreateLeasesForNewShardsAndClosedShard( + ExtendedSequenceNumber.AT_TIMESTAMP, INITIAL_POSITION_AT_TIMESTAMP); } - private void testCheckAndCreateLeasesForNewShardsAndClosedShard(final ExtendedSequenceNumber sequenceNumber, - final InitialPositionInStreamExtended position) throws Exception { + private void testCheckAndCreateLeasesForNewShardsAndClosedShard( + final ExtendedSequenceNumber sequenceNumber, final InitialPositionInStreamExtended position) + throws Exception { final String shardIdPrefix = "shardId-%d"; - final List shards = constructShardListForGraphA(); + final List shards = SHARD_GRAPH_A; final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER); // Marking shardId-0 as ShardEnd. - leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst() + leases.stream() + .filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())) + .findFirst() .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END)); // Marking child of shardId-0 to be processed and not at TRIM_HORIZON. - leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst() + leases.stream() + .filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())) + .findFirst() .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1"))); final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class); final ArgumentCaptor leaseDeleteCaptor = ArgumentCaptor.forClass(Lease.class); - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()).thenReturn(leases); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())).thenReturn(true); + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenReturn(shards); + when(dynamoDBLeaseRefresher.listLeases()) + .thenReturn(Collections.emptyList()) + .thenReturn(leases); + when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())) + .thenReturn(true); doNothing().when(dynamoDBLeaseRefresher).deleteLease(leaseDeleteCaptor.capture()); // Initial call: No leases present, create leases. - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + position, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues()); final Set expectedCreateLeases = getExpectedLeasesForGraphA(shards, sequenceNumber, position); - assertThat(createLeases, equalTo(expectedCreateLeases)); + assertEquals(expectedCreateLeases, createLeases); - verify(shardDetector, times(1)).listShards(); + verify(shardDetector, times(1)).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, times(1)).listLeases(); verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); // Second call: Leases present, no leases should be deleted. - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + position, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); final List deleteLeases = leaseDeleteCaptor.getAllValues(); - final Set shardIds = deleteLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set sequenceNumbers = deleteLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - assertThat(deleteLeases.size(), equalTo(0)); + assertTrue(deleteLeases.isEmpty()); - verify(shardDetector, times(2)).listShards(); + verify(shardDetector, times(2)).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, times(2)).listLeases(); } @@ -849,152 +883,212 @@ public class HierarchicalShardSyncerTest { @Test(expected = DependencyException.class) public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithListLeasesExceptions() throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions(ExtendedSequenceNumber.TRIM_HORIZON, - INITIAL_POSITION_TRIM_HORIZON); + testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions( + ExtendedSequenceNumber.TRIM_HORIZON, INITIAL_POSITION_TRIM_HORIZON); } @Test(expected = DependencyException.class) public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithListLeasesExceptions() throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions(ExtendedSequenceNumber.AT_TIMESTAMP, - INITIAL_POSITION_AT_TIMESTAMP); + testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions( + ExtendedSequenceNumber.AT_TIMESTAMP, INITIAL_POSITION_AT_TIMESTAMP); } private void testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions( final ExtendedSequenceNumber sequenceNumber, final InitialPositionInStreamExtended position) throws Exception { final String shardIdPrefix = "shardId-%d"; - final List shards = constructShardListForGraphA(); + final List shards = SHARD_GRAPH_A; final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER); // Marking shardId-0 as ShardEnd. - leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst() + leases.stream() + .filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())) + .findFirst() .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END)); // Marking child of shardId-0 to be processed and not at TRIM_HORIZON. - leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst() + leases.stream() + .filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())) + .findFirst() .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1"))); final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class); - when(shardDetector.listShards()).thenReturn(shards); + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenReturn(shards); when(dynamoDBLeaseRefresher.listLeases()) .thenThrow(new DependencyException(new Throwable("Throw for ListLeases"))) - .thenReturn(Collections.emptyList()).thenReturn(leases); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())).thenReturn(true); + .thenReturn(Collections.emptyList()) + .thenReturn(leases); + when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())) + .thenReturn(true); try { // Initial call: Call to create leases. Fails on ListLeases - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + position, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); } finally { - verify(shardDetector, times(1)).listShards(); + verify(shardDetector, times(1)).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, times(1)).listLeases(); verify(dynamoDBLeaseRefresher, never()).createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); // Second call: Leases not present, leases will be created. - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + position, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues()); final Set expectedCreateLeases = getExpectedLeasesForGraphA(shards, sequenceNumber, position); assertThat(createLeases, equalTo(expectedCreateLeases)); - verify(shardDetector, times(2)).listShards(); + verify(shardDetector, times(2)).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, times(2)).listLeases(); verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); // Final call: Leases present, belongs to TestOwner, shardId-0 is at ShardEnd should be cleaned up. - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + position, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); - final Set expectedShardIds = new HashSet<>( - Collections.singletonList(String.format(shardIdPrefix, 0))); - final Set expectedSequenceNumbers = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.SHARD_END)); - - verify(shardDetector, times(3)).listShards(); + verify(shardDetector, times(3)).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, times(3)).listLeases(); verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); } } + @Test + public void testDeletedStreamListProviderUpdateOnResourceNotFound() + throws ProvisionedThroughputException, InvalidStateException, DependencyException, InterruptedException { + DeletedStreamListProvider dummyDeletedStreamListProvider = new DeletedStreamListProvider(); + hierarchicalShardSyncer = + new HierarchicalShardSyncer(MULTISTREAM_MODE_ON, STREAM_IDENTIFIER, dummyDeletedStreamListProvider); + when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(false); + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenThrow(ResourceNotFoundException.builder().build()); + boolean response = hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_TRIM_HORIZON, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); + Set deletedStreamSet = dummyDeletedStreamListProvider.purgeAllDeletedStream(); + + assertFalse(response); + assertThat(deletedStreamSet.size(), equalTo(1)); + assertThat(deletedStreamSet.iterator().next().toString(), equalTo(STREAM_IDENTIFIER)); + + verify(shardDetector).listShardsWithoutConsumingResourceNotFoundException(); + verify(shardDetector, never()).listShards(); + } + @Test(expected = DependencyException.class) public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithCreateLeaseExceptions() throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions(ExtendedSequenceNumber.TRIM_HORIZON, - INITIAL_POSITION_TRIM_HORIZON); + testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions( + ExtendedSequenceNumber.TRIM_HORIZON, INITIAL_POSITION_TRIM_HORIZON); } @Test(expected = DependencyException.class) public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithCreateLeaseExceptions() throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions(ExtendedSequenceNumber.AT_TIMESTAMP, - INITIAL_POSITION_AT_TIMESTAMP); + testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions( + ExtendedSequenceNumber.AT_TIMESTAMP, INITIAL_POSITION_AT_TIMESTAMP); } private void testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions( final ExtendedSequenceNumber sequenceNumber, final InitialPositionInStreamExtended position) throws Exception { final String shardIdPrefix = "shardId-%d"; - final List shards = constructShardListForGraphA(); + final List shards = SHARD_GRAPH_A; final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER); // Marking shardId-0 as ShardEnd. - leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst() + leases.stream() + .filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())) + .findFirst() .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END)); // Marking child of shardId-0 to be processed and not at TRIM_HORIZON. - leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst() + leases.stream() + .filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())) + .findFirst() .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1"))); final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class); - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()) - .thenReturn(Collections.emptyList()).thenReturn(leases); + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenReturn(shards); + when(dynamoDBLeaseRefresher.listLeases()) + .thenReturn(Collections.emptyList()) + .thenReturn(Collections.emptyList()) + .thenReturn(leases); when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())) - .thenThrow(new DependencyException(new Throwable("Throw for CreateLease"))).thenReturn(true); + .thenThrow(new DependencyException(new Throwable("Throw for CreateLease"))) + .thenReturn(true); try { // Initial call: No leases present, create leases. Create lease Fails - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + position, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); } finally { - verify(shardDetector, times(1)).listShards(); + verify(shardDetector, times(1)).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, times(1)).listLeases(); verify(dynamoDBLeaseRefresher, times(1)).createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + position, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues()); final Set expectedCreateLeases = getExpectedLeasesForGraphA(shards, sequenceNumber, position); assertThat(createLeases, equalTo(expectedCreateLeases)); - verify(shardDetector, times(2)).listShards(); + verify(shardDetector, times(2)).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, times(2)).listLeases(); verify(dynamoDBLeaseRefresher, times(1 + expectedCreateLeases.size())) .createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); // Final call: Leases are present, shardId-0 is at ShardEnd needs to be cleaned up. - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + position, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); - verify(shardDetector, times(3)).listShards(); + verify(shardDetector, times(3)).listShardsWithoutConsumingResourceNotFoundException(); verify(dynamoDBLeaseRefresher, times(1 + expectedCreateLeases.size())) .createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, times(3)).listLeases(); @@ -1002,102 +1096,120 @@ public class HierarchicalShardSyncerTest { } } - private Lease createLeaseFromShard(final Shard shard, final ExtendedSequenceNumber checkpoint, - final String leaseOwner) { - return createLeasesFromShards(Collections.singletonList(shard), checkpoint, leaseOwner).get(0); + private List createLeasesFromShards( + final List shards, final ExtendedSequenceNumber checkpoint, final String leaseOwner) { + return shards.stream() + .map(shard -> { + final Set parentShardIds = new HashSet<>(); + if (StringUtils.isNotEmpty(shard.parentShardId())) { + parentShardIds.add(shard.parentShardId()); + } + if (StringUtils.isNotEmpty(shard.adjacentParentShardId())) { + parentShardIds.add(shard.adjacentParentShardId()); + } + return new Lease( + shard.shardId(), + leaseOwner, + 0L, + UUID.randomUUID(), + 0L, + checkpoint, + null, + 0L, + parentShardIds, + new HashSet<>(), + null, + HashKeyRangeForLease.fromHashKeyRange(shard.hashKeyRange())); + }) + .collect(Collectors.toList()); } - private MultiStreamLease createMultiStreamLeaseFromShard(final Shard shard, final ExtendedSequenceNumber checkpoint, - final String leaseOwner) { - return createMultiStreamLeasesFromShards(Collections.singletonList(shard), checkpoint, leaseOwner).get(0); - } - - private List createLeasesFromShards(final List shards, final ExtendedSequenceNumber checkpoint, - final String leaseOwner) { - return shards.stream().map(shard -> { - final Set parentShardIds = new HashSet<>(); - if (StringUtils.isNotEmpty(shard.parentShardId())) { - parentShardIds.add(shard.parentShardId()); - } - if (StringUtils.isNotEmpty(shard.adjacentParentShardId())) { - parentShardIds.add(shard.adjacentParentShardId()); - } - return new Lease(shard.shardId(), leaseOwner, 0L, UUID.randomUUID(), 0L, checkpoint, null, 0L, - parentShardIds, new HashSet<>(), null, HashKeyRangeForLease.fromHashKeyRange(shard.hashKeyRange())); - }).collect(Collectors.toList()); - } - - private List createMultiStreamLeasesFromShards(final List shards, final ExtendedSequenceNumber checkpoint, - final String leaseOwner) { - return shards.stream().map(shard -> { - final Set parentShardIds = new HashSet<>(); - if (StringUtils.isNotEmpty(shard.parentShardId())) { - parentShardIds.add(shard.parentShardId()); - } - if (StringUtils.isNotEmpty(shard.adjacentParentShardId())) { - parentShardIds.add(shard.adjacentParentShardId()); - } - final MultiStreamLease msLease = new MultiStreamLease(); - msLease.shardId(shard.shardId()); - msLease.leaseOwner(leaseOwner); - msLease.leaseCounter(0L); - msLease.concurrencyToken(UUID.randomUUID()); - msLease.lastCounterIncrementNanos(0L); - msLease.checkpoint(checkpoint); - msLease.parentShardIds(parentShardIds); - msLease.streamIdentifier(STREAM_IDENTIFIER); - return msLease; - }).collect(Collectors.toList()); + private List createMultiStreamLeasesFromShards( + final List shards, final ExtendedSequenceNumber checkpoint, final String leaseOwner) { + return shards.stream() + .map(shard -> { + final Set parentShardIds = new HashSet<>(); + if (StringUtils.isNotEmpty(shard.parentShardId())) { + parentShardIds.add(shard.parentShardId()); + } + if (StringUtils.isNotEmpty(shard.adjacentParentShardId())) { + parentShardIds.add(shard.adjacentParentShardId()); + } + final MultiStreamLease msLease = new MultiStreamLease(); + msLease.shardId(shard.shardId()); + msLease.leaseOwner(leaseOwner); + msLease.leaseCounter(0L); + msLease.concurrencyToken(UUID.randomUUID()); + msLease.lastCounterIncrementNanos(0L); + msLease.checkpoint(checkpoint); + msLease.parentShardIds(parentShardIds); + msLease.streamIdentifier(STREAM_IDENTIFIER); + return msLease; + }) + .collect(Collectors.toList()); } private void testCheckAndCreateLeasesForShardsIfMissing(InitialPositionInStreamExtended initialPosition) throws Exception { final String shardId0 = "shardId-0"; final String shardId1 = "shardId-1"; - final HashKeyRange range1 = ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, BigInteger.ONE.toString()); - final HashKeyRange range2 = ShardObjectHelper.newHashKeyRange(new BigInteger("2").toString(), ShardObjectHelper.MAX_HASH_KEY); + final HashKeyRange range1 = + ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, BigInteger.ONE.toString()); + final HashKeyRange range2 = + ShardObjectHelper.newHashKeyRange(new BigInteger("2").toString(), ShardObjectHelper.MAX_HASH_KEY); final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("11", null); - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange, range1), + final List shards = Arrays.asList( + ShardObjectHelper.newShard(shardId0, null, null, sequenceRange, range1), ShardObjectHelper.newShard(shardId1, null, null, sequenceRange, range2)); final Set expectedLeaseKeys = new HashSet<>(Arrays.asList(shardId0, shardId1)); testCheckAndCreateLeaseForShardsIfMissing(shards, initialPosition, expectedLeaseKeys); } - private void testCheckAndCreateLeaseForShardsIfMissing(final List shards, - final InitialPositionInStreamExtended initialPosition, - final Set expectedLeaseKeys) throws Exception { + private void testCheckAndCreateLeaseForShardsIfMissing( + final List shards, + final InitialPositionInStreamExtended initialPosition, + final Set expectedLeaseKeys) + throws Exception { testCheckAndCreateLeaseForShardsIfMissing(shards, initialPosition, expectedLeaseKeys, Collections.emptyList()); } - private void testCheckAndCreateLeaseForShardsIfMissing(final List shards, - final InitialPositionInStreamExtended initialPosition, - final Set expectedLeaseKeys, - final List existingLeases) throws Exception { - + private void testCheckAndCreateLeaseForShardsIfMissing( + final List shards, + final InitialPositionInStreamExtended initialPosition, + final Set expectedLeaseKeys, + final List existingLeases) + throws Exception { final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - when(shardDetector.listShards()).thenReturn(shards); + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenReturn(shards); when(shardDetector.listShardsWithFilter(any())).thenReturn(getFilteredShards(shards, initialPosition)); when(dynamoDBLeaseRefresher.listLeases()).thenReturn(existingLeases); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(existingLeases.isEmpty()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); + when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())) + .thenReturn(true); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, initialPosition, - SCOPE, false, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + initialPosition, + SCOPE, + false, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); final List leases = leaseCaptor.getAllValues(); final Set leaseKeys = leases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set leaseSequenceNumbers = leases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); + final Set leaseSequenceNumbers = + leases.stream().map(Lease::checkpoint).collect(Collectors.toSet()); - final Set expectedSequenceNumbers = new HashSet<>(Collections - .singletonList(new ExtendedSequenceNumber(initialPosition.getInitialPositionInStream().name()))); + final Set expectedSequenceNumbers = + new HashSet<>(Collections.singletonList(new ExtendedSequenceNumber( + initialPosition.getInitialPositionInStream().name()))); - assertThat(leases.size(), equalTo(expectedLeaseKeys.size())); - assertThat(leaseKeys, equalTo(expectedLeaseKeys)); - assertThat(leaseSequenceNumbers, equalTo(expectedSequenceNumbers)); + assertEquals(expectedLeaseKeys.size(), leases.size()); + assertEquals(expectedLeaseKeys, leaseKeys); + assertEquals(expectedSequenceNumbers, leaseSequenceNumbers); verify(dynamoDBLeaseRefresher, times(expectedLeaseKeys.size())).createLeaseIfNotExists(any(Lease.class)); verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); @@ -1108,26 +1220,30 @@ public class HierarchicalShardSyncerTest { final String shardId0 = "shardId-0"; final String shardId1 = "shardId-1"; final List currentLeases = new ArrayList<>(); - final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); + final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = + new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer(); final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), + final List shards = Arrays.asList( + ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), ShardObjectHelper.newShard(shardId1, null, null, sequenceRange)); - final Set initialPositions = new HashSet<>( - Arrays.asList(INITIAL_POSITION_LATEST, INITIAL_POSITION_TRIM_HORIZON)); + final Set initialPositions = + new HashSet<>(Arrays.asList(INITIAL_POSITION_LATEST, INITIAL_POSITION_TRIM_HORIZON)); final Set expectedLeaseShardIds = new HashSet<>(Arrays.asList(shardId0, shardId1)); for (InitialPositionInStreamExtended initialPosition : initialPositions) { - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, currentLeases, - initialPosition); - assertThat(newLeases.size(), equalTo(2)); + final List newLeases = + determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, currentLeases, initialPosition); + assertEquals(2, newLeases.size()); for (Lease lease : newLeases) { - assertThat(expectedLeaseShardIds.contains(lease.leaseKey()), equalTo(true)); - assertThat(lease.checkpoint(), - equalTo(new ExtendedSequenceNumber(initialPosition.getInitialPositionInStream().toString()))); + assertTrue(expectedLeaseShardIds.contains(lease.leaseKey())); + assertThat( + lease.checkpoint(), + equalTo(new ExtendedSequenceNumber( + initialPosition.getInitialPositionInStream().toString()))); } } } @@ -1140,26 +1256,30 @@ public class HierarchicalShardSyncerTest { final String lastShardId = "shardId-1"; final List shardsWithoutLeases = Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, - ShardObjectHelper.newSequenceNumberRange("303", "404")), - ShardObjectHelper.newShard(lastShardId, null, null, - ShardObjectHelper.newSequenceNumberRange("405", null))); + ShardObjectHelper.newShard( + "shardId-0", null, null, ShardObjectHelper.newSequenceNumberRange("303", "404")), + ShardObjectHelper.newShard( + lastShardId, null, null, ShardObjectHelper.newSequenceNumberRange("405", null))); - final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard("shardId-2", null, - null, ShardObjectHelper.newSequenceNumberRange("202", "302"))); + final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard( + "shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("202", "302"))); - final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases).flatMap(x -> x.stream()).collect(Collectors.toList()); - final List currentLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo"); - final Set inconsistentShardIds = Collections.emptySet(); + final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases) + .flatMap(x -> x.stream()) + .collect(Collectors.toList()); + final List currentLeases = + createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo"); Map shardIdToShardMap = HierarchicalShardSyncer.constructShardIdToShardMap(shards); - Map> shardIdToChildShardIdsMap = HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + Map> shardIdToChildShardIdsMap = + HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); final HierarchicalShardSyncer.LeaseSynchronizer leaseSynchronizer = - new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); + new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer( + shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, - INITIAL_POSITION_LATEST); + final List newLeases = + determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, INITIAL_POSITION_LATEST); assertThat(newLeases.size(), equalTo(1)); assertThat(newLeases.get(0).leaseKey(), equalTo(lastShardId)); @@ -1168,6 +1288,7 @@ public class HierarchicalShardSyncerTest { /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1176,20 +1297,22 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (3, 4, 5)
          * Initial position: LATEST
          * Expected leases: (2, 6)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestA_PartialHashRange1() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-3", "shardId-4", "shardId-5"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1198,20 +1321,21 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (4, 5, 7)
          * Initial position: LATEST
          * Expected leases: (6)
    -     *
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestA_PartialHashRange2() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-5", "shardId-7"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1220,22 +1344,24 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (2, 6)
          * Initial position: LATEST
          * Expected leases: (3, 4, 9, 10)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestA_PartialHashRange3() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-2", "shardId-6"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.LATEST); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1244,18 +1370,20 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (4, 9, 10)
          * Initial position: LATEST
          * Expected leases: (8)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestA_PartialHashRange4() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-9", "shardId-10"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.LATEST); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors + *
          * Helper method to construct a shard list for graph C. Graph C is defined below. Shard structure (y-axis is
          * epochs):     0      1  2  3  - shards till
          *            /   \    |  \ /
    @@ -1266,22 +1394,24 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (9, 10)
          * Initial position: LATEST
          * Expected leases: (1, 6, 7, 8)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestC_PartialHashRange5() { - final List shards = constructShardListForGraphC(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-9", "shardId-10"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.LATEST); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_C, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1290,18 +1420,18 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (4, 5, 6, 7)
          * Initial position: LATEST
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestA_CompleteHashRange() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-5", "shardId-6", "shardId-7"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedNoNewLeases); + assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1310,19 +1440,19 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (0, 1, 2, 3, 4, 5, 6, 7)
          * Initial position: LATEST
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestA_CompleteHashRangeWithoutGC() { - final List shards = constructShardListForGraphA(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", - "shardId-4", "shardId-5", "shardId-6", "shardId-7"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = Arrays.asList( + "shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5", "shardId-6", "shardId-7"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1331,22 +1461,23 @@ public class HierarchicalShardSyncerTest {
          * Current leases: empty set
          * Initial position: LATEST
          * Expected leases: (4, 8, 9, 10)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestA_EmptyLeaseTable() { - final List shards = constructShardListForGraphA(); - final List shardIdsOfCurrentLeases = Collections.emptyList(); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.LATEST); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, Collections.emptyList(), INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1355,17 +1486,17 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (0, 1, 4, 7, 9, 10)
          * Initial position: LATEST
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestA_CompleteHashRangeAcrossDifferentEpochs() { - final List shards = constructShardListForGraphA(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7", - "shardId-9", "shardId-10"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = + Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7", "shardId-9", "shardId-10"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST); } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1376,17 +1507,19 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (6)
          * Initial position: LATEST
          * Expected leases: (7)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestB_PartialHashRange() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-6"); + final List shardIdsOfCurrentLeases = Collections.singletonList("shardId-6"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.LATEST); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1397,16 +1530,16 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (5)
          * Initial position: LATEST
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestB_CompleteHashRange() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-5"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = Collections.singletonList("shardId-5"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST); } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1417,17 +1550,17 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (0, 1, 2, 3, 4, 5)
          * Initial position: LATEST
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestB_CompleteHashRangeWithoutGC() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", - "shardId-4", "shardId-5"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = + Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST); } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1438,20 +1571,21 @@ public class HierarchicalShardSyncerTest {
          * Current leases: empty set
          * Initial position: LATEST
          * Expected leases: (9, 10)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeLatestB_EmptyLeaseTable() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Collections.emptyList(); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.LATEST); expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.LATEST); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_B, Collections.emptyList(), INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1460,21 +1594,23 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (3, 4, 5)
          * Initial position: TRIM_HORIZON
          * Expected leases: (0, 1, 2)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonA_PartialHashRange1() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-3", "shardId-4", "shardId-5"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.TRIM_HORIZON); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1483,20 +1619,22 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (4, 5, 7)
          * Initial position: TRIM_HORIZON
          * Expected leases: (0, 1)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonA_PartialHashRange2() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-5", "shardId-7"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1505,21 +1643,23 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (2, 6)
          * Initial position: TRIM_HORIZON
          * Expected leases: (3, 4, 5)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonA_PartialHashRange3() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-2", "shardId-6"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-5", ExtendedSequenceNumber.TRIM_HORIZON); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1528,22 +1668,24 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (4, 9, 10)
          * Initial position: TRIM_HORIZON
          * Expected leases: (0, 1, 2, 3)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonA_PartialHashRange4() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-9", "shardId-10"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.TRIM_HORIZON); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1552,18 +1694,18 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (4, 5, 6, 7)
          * Initial position: TRIM_HORIZON
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonA_CompleteHashRange() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-5", "shardId-6", "shardId-7"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedNoNewLeases); + assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1572,19 +1714,19 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (0, 1, 2, 3, 4, 5, 6, 7)
          * Initial position: TRIM_HORIZON
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonA_CompleteHashRangeWithoutGC() { - final List shards = constructShardListForGraphA(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", - "shardId-4", "shardId-5", "shardId-6", "shardId-7"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = Arrays.asList( + "shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5", "shardId-6", "shardId-7"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1593,11 +1735,10 @@ public class HierarchicalShardSyncerTest {
          * Current leases: empty set
          * Initial position: TRIM_HORIZON
          * Expected leases: (0, 1, 2, 3, 4, 5)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonA_EmptyLeaseTable() { - final List shards = constructShardListForGraphA(); - final List shardIdsOfCurrentLeases = Collections.emptyList(); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON); @@ -1605,12 +1746,14 @@ public class HierarchicalShardSyncerTest { expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-5", ExtendedSequenceNumber.TRIM_HORIZON); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, Collections.emptyList(), INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1619,17 +1762,17 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (0, 1, 4, 7, 9, 10)
          * Initial position: TRIM_HORIZON
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonA_CompleteHashRangeAcrossDifferentEpochs() { - final List shards = constructShardListForGraphA(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7", - "shardId-9", "shardId-10"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = + Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7", "shardId-9", "shardId-10"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON); } /* + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1640,18 +1783,21 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (6)
          * Initial position: TRIM_HORIZON
          * Expected leases: (7)
    +     * 
    */ -// TODO: Account for out-of-order lease creation in TRIM_HORIZON and AT_TIMESTAMP cases -// @Test -// public void testDetermineNewLeasesToCreateSplitMergeHorizonB_PartialHashRange() { -// final List shards = constructShardListForGraphB(); -// final List shardIdsOfCurrentLeases = Arrays.asList("shardId-6"); -// final Map expectedShardIdCheckpointMap = new HashMap<>(); -// expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.TRIM_HORIZON); -// assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); -// } + // TODO: Account for out-of-order lease creation in TRIM_HORIZON and AT_TIMESTAMP cases + // @Test + // public void testDetermineNewLeasesToCreateSplitMergeHorizonB_PartialHashRange() { + // final List shards = constructShardListForGraphB(); + // final List shardIdsOfCurrentLeases = Arrays.asList("shardId-6"); + // final Map expectedShardIdCheckpointMap = new HashMap<>(); + // expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.TRIM_HORIZON); + // assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, + // expectedShardIdCheckpointMap); + // } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1662,16 +1808,16 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (5)
          * Initial position: TRIM_HORIZON
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonB_CompleteHashRange() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-5"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = Collections.singletonList("shardId-5"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON); } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1682,17 +1828,17 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (0, 1, 2, 3, 4, 5)
          * Initial position: TRIM_HORIZON
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonB_CompleteHashRangeWithoutGC() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", - "shardId-4", "shardId-5"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = + Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON); } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1703,20 +1849,21 @@ public class HierarchicalShardSyncerTest {
          * Current leases: empty set
          * Initial position: TRIM_HORIZON
          * Expected leases: (0, 1)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeHorizonB_EmptyLeaseTable() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Collections.emptyList(); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_B, Collections.emptyList(), INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1725,21 +1872,23 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (3, 4, 5)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: (0, 1, 2)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_PartialHashRange1() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-3", "shardId-4", "shardId-5"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.AT_TIMESTAMP); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1748,20 +1897,22 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (4, 5, 7)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: (0, 1)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_PartialHashRange2() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-5", "shardId-7"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1770,21 +1921,23 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (2, 6)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: (3, 4, 5)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_PartialHashRange3() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-2", "shardId-6"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-5", ExtendedSequenceNumber.AT_TIMESTAMP); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1793,22 +1946,24 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (4, 9, 10)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: (0, 1, 2, 3)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_PartialHashRange4() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-9", "shardId-10"); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.AT_TIMESTAMP); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1817,18 +1972,18 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (4, 5, 6, 7)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_CompleteHashRange() { - final List shards = constructShardListForGraphA(); final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-5", "shardId-6", "shardId-7"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedNoNewLeases); + assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1837,19 +1992,19 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (0, 1, 2, 3, 4, 5, 6, 7)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_CompleteHashRangeWithoutGC() { - final List shards = constructShardListForGraphA(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", - "shardId-4", "shardId-5", "shardId-6", "shardId-7"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = Arrays.asList( + "shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5", "shardId-6", "shardId-7"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1858,11 +2013,10 @@ public class HierarchicalShardSyncerTest {
          * Current leases: empty set
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: (0, 1, 2, 3, 4, 5)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_EmptyLeaseTable() { - final List shards = constructShardListForGraphA(); - final List shardIdsOfCurrentLeases = Collections.emptyList(); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP); @@ -1870,12 +2024,14 @@ public class HierarchicalShardSyncerTest { expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-5", ExtendedSequenceNumber.AT_TIMESTAMP); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_A, Collections.emptyList(), INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); } /** * Test CheckIfDescendantAndAddNewLeasesForAncestors * Shard structure (each level depicts a stream segment): + *
          * 0 1 2 3 4   5- shards till epoch 102
          * \ / \ / |   |
          *  6   7  4   5- shards from epoch 103 - 205
    @@ -1884,17 +2040,17 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (0, 1, 4, 7, 9, 10)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_CompleteHashRangeAcrossDifferentEpochs() { - final List shards = constructShardListForGraphA(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7", - "shardId-9", "shardId-10"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = + Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7", "shardId-9", "shardId-10"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP); } /* + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1905,18 +2061,21 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (6)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: (7)
    +     * 
    */ -// TODO: Account for out-of-order lease creation in TRIM_HORIZON and AT_TIMESTAMP cases -// @Test -// public void testDetermineNewLeasesToCreateSplitMergeAtTimestampB_PartialHashRange() { -// final List shards = constructShardListForGraphB(); -// final List shardIdsOfCurrentLeases = Arrays.asList("shardId-6"); -// final Map expectedShardIdCheckpointMap = new HashMap<>(); -// expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.AT_TIMESTAMP); -// assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); -// } + // TODO: Account for out-of-order lease creation in TRIM_HORIZON and AT_TIMESTAMP cases + // @Test + // public void testDetermineNewLeasesToCreateSplitMergeAtTimestampB_PartialHashRange() { + // final List shards = constructShardListForGraphB(); + // final List shardIdsOfCurrentLeases = Arrays.asList("shardId-6"); + // final Map expectedShardIdCheckpointMap = new HashMap<>(); + // expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.AT_TIMESTAMP); + // assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, + // expectedShardIdCheckpointMap); + // } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1927,16 +2086,16 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (5)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampB_CompleteHashRange() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-5"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = Collections.singletonList("shardId-5"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP); } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1947,17 +2106,17 @@ public class HierarchicalShardSyncerTest {
          * Current leases: (0, 1, 2, 3, 4, 5)
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: empty set
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampB_CompleteHashRangeWithoutGC() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", - "shardId-4", "shardId-5"); - final Map expectedNoNewLeases = Collections.emptyMap(); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedNoNewLeases); + final List shardIdsOfCurrentLeases = + Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5"); + assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP); } - /* + /** + *
          * Shard structure (x-axis is epochs):
          * 0  3   6   9
          * \ / \ / \ /
    @@ -1968,44 +2127,57 @@ public class HierarchicalShardSyncerTest {
          * Current leases: empty set
          * Initial position: AT_TIMESTAMP(1000)
          * Expected leases: (0, 1)
    +     * 
    */ @Test public void testDetermineNewLeasesToCreateSplitMergeAtTimestampB_EmptyLeaseTable() { - final List shards = constructShardListForGraphB(); - final List shardIdsOfCurrentLeases = Collections.emptyList(); final Map expectedShardIdCheckpointMap = new HashMap<>(); expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP); expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP); - assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); + assertExpectedLeasesAreCreated( + SHARD_GRAPH_B, Collections.emptyList(), INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); } - private void assertExpectedLeasesAreCreated(List shards, - List shardIdsOfCurrentLeases, - InitialPositionInStreamExtended initialPosition, - Map expectedShardIdCheckpointMap) { + private void assertExpectedLeasesAreCreated( + final List shards, + final List shardIdsOfCurrentLeases, + final InitialPositionInStreamExtended initialPosition) { + assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, initialPosition, Collections.emptyMap()); + } + + private void assertExpectedLeasesAreCreated( + List shards, + List shardIdsOfCurrentLeases, + InitialPositionInStreamExtended initialPosition, + Map expectedShardIdCheckpointMap) { final List currentLeases = shardIdsOfCurrentLeases.stream() - .map(shardId -> newLease(shardId)).collect(Collectors.toList()); + .map(shardId -> newLease(shardId)) + .collect(Collectors.toList()); final Map shardIdToShardMap = HierarchicalShardSyncer.constructShardIdToShardMap(shards); - final Map> shardIdToChildShardIdsMap = HierarchicalShardSyncer - .constructShardIdToChildShardIdsMap(shardIdToShardMap); + final Map> shardIdToChildShardIdsMap = + HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); final HierarchicalShardSyncer.LeaseSynchronizer nonEmptyLeaseTableSynchronizer = - new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); + new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer( + shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(nonEmptyLeaseTableSynchronizer, - shards, currentLeases, initialPosition); + final List newLeases = + determineNewLeasesToCreate(nonEmptyLeaseTableSynchronizer, shards, currentLeases, initialPosition); assertThat(newLeases.size(), equalTo(expectedShardIdCheckpointMap.size())); for (Lease lease : newLeases) { - assertThat("Unexpected lease: " + lease, expectedShardIdCheckpointMap.containsKey(lease.leaseKey()), + assertThat( + "Unexpected lease: " + lease, + expectedShardIdCheckpointMap.containsKey(lease.leaseKey()), equalTo(true)); assertThat(lease.checkpoint(), equalTo(expectedShardIdCheckpointMap.get(lease.leaseKey()))); } } - /* + /** + *
          * Helper method to construct a shard list for graph A. Graph A is defined below. Shard structure (y-axis is
          * epochs): 0 1 2 3 4   5- shards till
          *          \ / \ / |   |
    @@ -2013,8 +2185,9 @@ public class HierarchicalShardSyncerTest {
          *            \ /   |  /\
          *             8    4 9 10 -
          * shards from epoch 206 (open - no ending sequenceNumber)
    +     * 
    */ - private List constructShardListForGraphA() { + private static List constructShardListForGraphA() { final SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("11", "102"); final SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("11", null); final SequenceNumberRange range2 = ShardObjectHelper.newSequenceNumberRange("11", "205"); @@ -2022,27 +2195,35 @@ public class HierarchicalShardSyncerTest { final SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("206", null); return Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, range0, - ShardObjectHelper.newHashKeyRange("0", "99")), - ShardObjectHelper.newShard("shardId-1", null, null, range0, - ShardObjectHelper.newHashKeyRange("100", "199")), - ShardObjectHelper.newShard("shardId-2", null, null, range0, - ShardObjectHelper.newHashKeyRange("200", "299")), - ShardObjectHelper.newShard("shardId-3", null, null, range0, - ShardObjectHelper.newHashKeyRange("300", "399")), - ShardObjectHelper.newShard("shardId-4", null, null, range1, - ShardObjectHelper.newHashKeyRange("400", "499")), - ShardObjectHelper.newShard("shardId-5", null, null, range2, + ShardObjectHelper.newShard( + "shardId-0", null, null, range0, ShardObjectHelper.newHashKeyRange("0", "99")), + ShardObjectHelper.newShard( + "shardId-1", null, null, range0, ShardObjectHelper.newHashKeyRange("100", "199")), + ShardObjectHelper.newShard( + "shardId-2", null, null, range0, ShardObjectHelper.newHashKeyRange("200", "299")), + ShardObjectHelper.newShard( + "shardId-3", null, null, range0, ShardObjectHelper.newHashKeyRange("300", "399")), + ShardObjectHelper.newShard( + "shardId-4", null, null, range1, ShardObjectHelper.newHashKeyRange("400", "499")), + ShardObjectHelper.newShard( + "shardId-5", + null, + null, + range2, ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY)), - ShardObjectHelper.newShard("shardId-6", "shardId-0", "shardId-1", range3, - ShardObjectHelper.newHashKeyRange("0", "199")), - ShardObjectHelper.newShard("shardId-7", "shardId-2", "shardId-3", range3, - ShardObjectHelper.newHashKeyRange("200", "399")), - ShardObjectHelper.newShard("shardId-8", "shardId-6", "shardId-7", range4, - ShardObjectHelper.newHashKeyRange("0", "399")), - ShardObjectHelper.newShard("shardId-9", "shardId-5", null, range4, - ShardObjectHelper.newHashKeyRange("500", "799")), - ShardObjectHelper.newShard("shardId-10", null, "shardId-5", range4, + ShardObjectHelper.newShard( + "shardId-6", "shardId-0", "shardId-1", range3, ShardObjectHelper.newHashKeyRange("0", "199")), + ShardObjectHelper.newShard( + "shardId-7", "shardId-2", "shardId-3", range3, ShardObjectHelper.newHashKeyRange("200", "399")), + ShardObjectHelper.newShard( + "shardId-8", "shardId-6", "shardId-7", range4, ShardObjectHelper.newHashKeyRange("0", "399")), + ShardObjectHelper.newShard( + "shardId-9", "shardId-5", null, range4, ShardObjectHelper.newHashKeyRange("500", "799")), + ShardObjectHelper.newShard( + "shardId-10", + null, + "shardId-5", + range4, ShardObjectHelper.newHashKeyRange("800", ShardObjectHelper.MAX_HASH_KEY))); } @@ -2057,25 +2238,32 @@ public class HierarchicalShardSyncerTest { .collect(Collectors.toList()); case TRIM_HORIZON: String minSeqNum = shards.stream() - .min(Comparator.comparingLong(s -> Long.parseLong(s.sequenceNumberRange().startingSequenceNumber()))) + .min(Comparator.comparingLong( + s -> Long.parseLong(s.sequenceNumberRange().startingSequenceNumber()))) .map(s -> s.sequenceNumberRange().startingSequenceNumber()) .orElseThrow(RuntimeException::new); return shards.stream() - .filter(s -> s.sequenceNumberRange().startingSequenceNumber().equals(minSeqNum)) + .filter(s -> + s.sequenceNumberRange().startingSequenceNumber().equals(minSeqNum)) .collect(Collectors.toList()); case AT_TIMESTAMP: return shards.stream() - .filter(s -> new Date(Long.parseLong(s.sequenceNumberRange().startingSequenceNumber())) - .compareTo(initialPosition.getTimestamp()) <= 0) - .filter(s -> s.sequenceNumberRange().endingSequenceNumber() == null || - new Date(Long.parseLong(s.sequenceNumberRange().endingSequenceNumber())) - .compareTo(initialPosition.getTimestamp()) > 0) + .filter(s -> + new Date(Long.parseLong(s.sequenceNumberRange().startingSequenceNumber())) + .compareTo(initialPosition.getTimestamp()) + <= 0) + .filter(s -> s.sequenceNumberRange().endingSequenceNumber() == null + || new Date(Long.parseLong( + s.sequenceNumberRange().endingSequenceNumber())) + .compareTo(initialPosition.getTimestamp()) + > 0) .collect(Collectors.toList()); } throw new RuntimeException("Unsupported initial position " + initialPosition); } - /* + /** + *
          * Helper method to get expected shards for Graph A based on initial position in stream. Shard structure (y-axis is
          * epochs): 0 1 2 3 4   5- shards till
          *          \ / \ / |   |
    @@ -2083,10 +2271,12 @@ public class HierarchicalShardSyncerTest {
          *            \ /   |  /\
          *             8    4 9 10 -
          * shards from epoch 206 (open - no ending sequenceNumber)
    +     * 
    */ - private Set getExpectedLeasesForGraphA(List shards, - ExtendedSequenceNumber sequenceNumber, - InitialPositionInStreamExtended initialPosition) { + private Set getExpectedLeasesForGraphA( + List shards, + ExtendedSequenceNumber sequenceNumber, + InitialPositionInStreamExtended initialPosition) { final List filteredShards; if (initialPosition.getInitialPositionInStream().equals(InitialPositionInStream.AT_TIMESTAMP)) { // Lease creation for AT_TIMESTAMP should work the same as for TRIM_HORIZON - ignore shard filters @@ -2097,16 +2287,18 @@ public class HierarchicalShardSyncerTest { return new HashSet<>(createLeasesFromShards(filteredShards, sequenceNumber, null)); } -// /* -// * Helper method to construct a shard list for graph B. Graph B is defined below. -// * Shard structure (x-axis is epochs): -// * 0 3 6 9 -// * \ / \ / \ / -// * 2 5 8 -// * / \ / \ / \ -// * 1 4 7 10 -// */ - private List constructShardListForGraphB() { + /** + * Helper method to construct a shard list for graph B. Graph B is defined below. + * Shard structure (x-axis is epochs): + *
    +     * 0  3   6   9
    +     * \ / \ / \ /
    +     *  2   5   8
    +     * / \ / \ / \
    +     * 1  4   7  10
    +     * 
    + */ + private static List constructShardListForGraphB() { final SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("1000", "1049"); final SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("1050", "1099"); final SequenceNumberRange range2 = ShardObjectHelper.newSequenceNumberRange("1100", "1149"); @@ -2119,7 +2311,8 @@ public class HierarchicalShardSyncerTest { final HashKeyRange hashRange1 = ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY); final HashKeyRange hashRange2 = ShardObjectHelper.newHashKeyRange("0", ShardObjectHelper.MAX_HASH_KEY); - return Arrays.asList(ShardObjectHelper.newShard("shardId-0", null, null, range0, hashRange0), + return Arrays.asList( + ShardObjectHelper.newShard("shardId-0", null, null, range0, hashRange0), ShardObjectHelper.newShard("shardId-1", null, null, range0, hashRange1), ShardObjectHelper.newShard("shardId-2", "shardId-0", "shardId-1", range1, hashRange2), ShardObjectHelper.newShard("shardId-3", "shardId-2", null, range2, hashRange0), @@ -2133,6 +2326,7 @@ public class HierarchicalShardSyncerTest { } /** + *
          * Helper method to construct a shard list for graph C. Graph C is defined below. Shard structure (y-axis is
          * epochs):     0      1  2  3  - shards till
          *            /   \    |  \ /
    @@ -2140,8 +2334,9 @@ public class HierarchicalShardSyncerTest {
          *          / \   / \  |   |
          *         7   8 9  10 1   6
          * shards from epoch 206 (open - no ending sequenceNumber)
    +     * 
    */ - private List constructShardListForGraphC() { + private static List constructShardListForGraphC() { final SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("11", "102"); final SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("11", null); final SequenceNumberRange range2 = ShardObjectHelper.newSequenceNumberRange("103", null); @@ -2149,28 +2344,36 @@ public class HierarchicalShardSyncerTest { final SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("206", null); return Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, range0, - ShardObjectHelper.newHashKeyRange("0", "399")), - ShardObjectHelper.newShard("shardId-1", null, null, range1, - ShardObjectHelper.newHashKeyRange("400", "499")), - ShardObjectHelper.newShard("shardId-2", null, null, range0, - ShardObjectHelper.newHashKeyRange("500", "599")), - ShardObjectHelper.newShard("shardId-3", null, null, range0, + ShardObjectHelper.newShard( + "shardId-0", null, null, range0, ShardObjectHelper.newHashKeyRange("0", "399")), + ShardObjectHelper.newShard( + "shardId-1", null, null, range1, ShardObjectHelper.newHashKeyRange("400", "499")), + ShardObjectHelper.newShard( + "shardId-2", null, null, range0, ShardObjectHelper.newHashKeyRange("500", "599")), + ShardObjectHelper.newShard( + "shardId-3", + null, + null, + range0, ShardObjectHelper.newHashKeyRange("600", ShardObjectHelper.MAX_HASH_KEY)), - ShardObjectHelper.newShard("shardId-4", "shardId-0", null, range3, - ShardObjectHelper.newHashKeyRange("0", "199")), - ShardObjectHelper.newShard("shardId-5", "shardId-0", null, range3, - ShardObjectHelper.newHashKeyRange("200", "399")), - ShardObjectHelper.newShard("shardId-6", "shardId-2", "shardId-3", range2, + ShardObjectHelper.newShard( + "shardId-4", "shardId-0", null, range3, ShardObjectHelper.newHashKeyRange("0", "199")), + ShardObjectHelper.newShard( + "shardId-5", "shardId-0", null, range3, ShardObjectHelper.newHashKeyRange("200", "399")), + ShardObjectHelper.newShard( + "shardId-6", + "shardId-2", + "shardId-3", + range2, ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY)), - ShardObjectHelper.newShard("shardId-7", "shardId-4", null, range4, - ShardObjectHelper.newHashKeyRange("0", "99")), - ShardObjectHelper.newShard("shardId-8", "shardId-4", null, range4, - ShardObjectHelper.newHashKeyRange("100", "199")), - ShardObjectHelper.newShard("shardId-9", "shardId-5", null, range4, - ShardObjectHelper.newHashKeyRange("200", "299")), - ShardObjectHelper.newShard("shardId-10", "shardId-5", null, range4, - ShardObjectHelper.newHashKeyRange("300", "399"))); + ShardObjectHelper.newShard( + "shardId-7", "shardId-4", null, range4, ShardObjectHelper.newHashKeyRange("0", "99")), + ShardObjectHelper.newShard( + "shardId-8", "shardId-4", null, range4, ShardObjectHelper.newHashKeyRange("100", "199")), + ShardObjectHelper.newShard( + "shardId-9", "shardId-5", null, range4, ShardObjectHelper.newHashKeyRange("200", "299")), + ShardObjectHelper.newShard( + "shardId-10", "shardId-5", null, range4, ShardObjectHelper.newHashKeyRange("300", "399"))); } /** @@ -2180,9 +2383,8 @@ public class HierarchicalShardSyncerTest { public void testCheckIfDescendantAndAddNewLeasesForAncestorsNullShardId() { final MemoizationContext memoizationContext = new MemoizationContext(); - assertThat(HierarchicalShardSyncer - .checkIfDescendantAndAddNewLeasesForAncestors(null, INITIAL_POSITION_LATEST, null, null, - null, memoizationContext), equalTo(false)); + assertFalse(HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors( + null, INITIAL_POSITION_LATEST, null, null, null, memoizationContext)); } /** @@ -2193,9 +2395,8 @@ public class HierarchicalShardSyncerTest { final String shardId = "shardId-trimmed"; final MemoizationContext memoizationContext = new MemoizationContext(); - assertThat(HierarchicalShardSyncer - .checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, null, - new HashMap<>(), null, memoizationContext), equalTo(false)); + assertFalse(HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors( + shardId, INITIAL_POSITION_LATEST, null, new HashMap<>(), null, memoizationContext)); } /** @@ -2210,10 +2411,13 @@ public class HierarchicalShardSyncerTest { final Map kinesisShards = new HashMap<>(); kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, null, null, null)); - assertThat( - HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, - shardIdsOfCurrentLeases, kinesisShards, newLeaseMap, memoizationContext), equalTo(true)); - assertThat(newLeaseMap.isEmpty(), equalTo(true)); + assertTrue(HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors( + shardId, + INITIAL_POSITION_LATEST, + shardIdsOfCurrentLeases, + kinesisShards, + newLeaseMap, + memoizationContext)); } /** @@ -2233,53 +2437,67 @@ public class HierarchicalShardSyncerTest { kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null)); - assertThat( - HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, - shardIdsOfCurrentLeases, kinesisShards, newLeaseMap, memoizationContext), equalTo(false)); - assertThat(newLeaseMap.isEmpty(), equalTo(true)); + assertFalse(HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors( + shardId, + INITIAL_POSITION_LATEST, + shardIdsOfCurrentLeases, + kinesisShards, + newLeaseMap, + memoizationContext)); } /** * Tests that when reading from TIP, we use the AT_LATEST shard filter. - * @throws Exception */ @Test public void testEmptyLeaseTableBootstrapUsesShardFilterWithAtLatest() throws Exception { - ShardFilter shardFilter = ShardFilter.builder().type(ShardFilterType.AT_LATEST).build(); + ShardFilter shardFilter = + ShardFilter.builder().type(ShardFilterType.AT_LATEST).build(); testEmptyLeaseTableBootstrapUsesListShardsWithFilter(INITIAL_POSITION_LATEST, shardFilter); } /** * Tests that when reading from TRIM, we use the TRIM_HORIZON shard filter. - * @throws Exception */ @Test public void testEmptyLeaseTableBootstrapUsesShardFilterWithAtTrimHorizon() throws Exception { - ShardFilter shardFilter = ShardFilter.builder().type(ShardFilterType.AT_TRIM_HORIZON).build(); + ShardFilter shardFilter = + ShardFilter.builder().type(ShardFilterType.AT_TRIM_HORIZON).build(); testEmptyLeaseTableBootstrapUsesListShardsWithFilter(INITIAL_POSITION_TRIM_HORIZON, shardFilter); } /** * Tests that when reading from AT_TIMESTAMP, we use the AT_TIMESTAMP shard filter. - * @throws Exception */ @Test public void testEmptyLeaseTableBootstrapUsesShardFilterWithAtTimestamp() throws Exception { - ShardFilter shardFilter = ShardFilter.builder().type(ShardFilterType.AT_TIMESTAMP).timestamp(new Date(1000L).toInstant()).build(); + ShardFilter shardFilter = ShardFilter.builder() + .type(ShardFilterType.AT_TIMESTAMP) + .timestamp(new Date(1000L).toInstant()) + .build(); testEmptyLeaseTableBootstrapUsesListShardsWithFilter(INITIAL_POSITION_AT_TIMESTAMP, shardFilter); } - public void testEmptyLeaseTableBootstrapUsesListShardsWithFilter(InitialPositionInStreamExtended initialPosition, ShardFilter shardFilter) throws Exception { + public void testEmptyLeaseTableBootstrapUsesListShardsWithFilter( + InitialPositionInStreamExtended initialPosition, ShardFilter shardFilter) throws Exception { final String shardId0 = "shardId-0"; - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, - ShardObjectHelper.newSequenceNumberRange("1", null), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, ShardObjectHelper.MAX_HASH_KEY))); + final List shards = Arrays.asList(ShardObjectHelper.newShard( + shardId0, + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", null), + ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, ShardObjectHelper.MAX_HASH_KEY))); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); when(shardDetector.listShardsWithFilter(shardFilter)).thenReturn(shards); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, initialPosition, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + initialPosition, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); verify(shardDetector, atLeast(1)).listShardsWithFilter(shardFilter); verify(shardDetector, never()).listShards(); @@ -2290,42 +2508,61 @@ public class HierarchicalShardSyncerTest { final String shardId0 = "shardId-0"; final String shardId1 = "shardId-1"; - final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"))); - final List shardsWithoutLeases = Arrays.asList(ShardObjectHelper.newShard(shardId1, null, null, ShardObjectHelper.newSequenceNumberRange("3", "4"))); + final List shardsWithLeases = Arrays.asList( + ShardObjectHelper.newShard(shardId0, null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"))); + final List shardsWithoutLeases = Arrays.asList( + ShardObjectHelper.newShard(shardId1, null, null, ShardObjectHelper.newSequenceNumberRange("3", "4"))); - final List currentLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo"); + final List currentLeases = + createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo"); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(false); - when(shardDetector.listShards()).thenReturn(shardsWithoutLeases); + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenReturn(shardsWithoutLeases); when(dynamoDBLeaseRefresher.listLeases()).thenReturn(currentLeases); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); - verify(shardDetector, atLeast(1)).listShards(); + verify(shardDetector, atLeast(1)).listShardsWithoutConsumingResourceNotFoundException(); } /** * Tries to boostrap empty lease table. Verifies that if we fail to get a complete hash range of shards after three * retries, we fast fail and throw an exception. - * @throws Exception */ @Test(expected = KinesisClientLibIOException.class) public void testEmptyLeaseTableThrowsExceptionWhenHashRangeIsStillIncompleteAfterRetries() throws Exception { final List shardsWithIncompleteHashRange = Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange("0", "1")), - ShardObjectHelper.newShard("shardId-1", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange("2", "3")) - ); + ShardObjectHelper.newShard( + "shardId-0", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange("0", "1")), + ShardObjectHelper.newShard( + "shardId-1", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange("2", "3"))); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); when(shardDetector.listShardsWithFilter(any(ShardFilter.class))).thenReturn(shardsWithIncompleteHashRange); try { - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, ignoreUnexpectedChildShards, - dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); } finally { verify(shardDetector, times(3)).listShardsWithFilter(any(ShardFilter.class)); // Verify retries. } @@ -2334,27 +2571,49 @@ public class HierarchicalShardSyncerTest { /** * Tries to bootstrap an empty lease table. Verifies that after getting an incomplete hash range of shards two times * and a complete hash range the final time, we create the leases. - * @throws Exception */ @Test public void testEmptyLeaseTablePopulatesLeasesWithCompleteHashRangeAfterTwoRetries() throws Exception { final List shardsWithIncompleteHashRange = Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "69")), - ShardObjectHelper.newShard("shardId-1", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange("71", ShardObjectHelper.MAX_HASH_KEY)) - ); + ShardObjectHelper.newShard( + "shardId-0", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "69")), + ShardObjectHelper.newShard( + "shardId-1", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange("71", ShardObjectHelper.MAX_HASH_KEY))); final List shardsWithCompleteHashRange = Arrays.asList( - ShardObjectHelper.newShard("shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")), - ShardObjectHelper.newShard("shardId-3", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY)) - ); + ShardObjectHelper.newShard( + "shardId-2", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")), + ShardObjectHelper.newShard( + "shardId-3", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY))); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); - when(shardDetector.listShardsWithFilter(any(ShardFilter.class))).thenReturn(shardsWithIncompleteHashRange) - .thenReturn(shardsWithIncompleteHashRange).thenReturn(shardsWithCompleteHashRange); + when(shardDetector.listShardsWithFilter(any(ShardFilter.class))) + .thenReturn(shardsWithIncompleteHashRange) + .thenReturn(shardsWithIncompleteHashRange) + .thenReturn(shardsWithCompleteHashRange); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, ignoreUnexpectedChildShards, - dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); verify(shardDetector, times(3)).listShardsWithFilter(any(ShardFilter.class)); // Verify retries. verify(dynamoDBLeaseRefresher, times(2)).createLeaseIfNotExists(any(Lease.class)); @@ -2362,552 +2621,579 @@ public class HierarchicalShardSyncerTest { /** * Tries to bootstrap an empty lease table. Verifies that leases are created when we have a complete hash range of shards. - * @throws Exception */ @Test public void testEmptyLeaseTablePopulatesLeasesWithCompleteHashRange() throws Exception { final List shardsWithCompleteHashRange = Arrays.asList( - ShardObjectHelper.newShard("shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")), - ShardObjectHelper.newShard("shardId-3", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY)) - ); + ShardObjectHelper.newShard( + "shardId-2", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")), + ShardObjectHelper.newShard( + "shardId-3", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY))); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); when(shardDetector.listShardsWithFilter(any(ShardFilter.class))).thenReturn(shardsWithCompleteHashRange); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, ignoreUnexpectedChildShards, - dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); verify(shardDetector, times(1)).listShardsWithFilter(any(ShardFilter.class)); // Verify retries. verify(dynamoDBLeaseRefresher, times(2)).createLeaseIfNotExists(any(Lease.class)); } -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, there is a lease for one parent. -// */ -// @Test + // /** + // * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, there is a lease for one parent. + // */ + // @Test // public void testCheckIfDescendantAndAddNewLeasesForAncestors2P2A1PDescendant() { -// Set shardIdsOfCurrentLeases = new HashSet(); -// Map newLeaseMap = new HashMap(); -// Map kinesisShards = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); -// shardIdsOfCurrentLeases.add(parentShardId); -// -// String adjacentParentShardId = "shardId-adjacentParent"; -// kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); -// -// String shardId = "shardId-9-1"; -// Shard shard = ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null); -// kinesisShards.put(shardId, shard); -// -// Map memoizationContext = new HashMap<>(); -// assertTrue(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, -// shardIdsOfCurrentLeases, -// kinesisShards, -// newLeaseMap, -// memoizationContext)); -// assertEquals(1, newLeaseMap.size()); -// assertTrue(newLeaseMap.containsKey(adjacentParentShardId)); -// Lease adjacentParentLease = newLeaseMap.get(adjacentParentShardId); -// assertEquals(ExtendedSequenceNumber.LATEST, adjacentParentLease.checkpoint()); -// } -// -// /** -// * Test parentShardIds() when the shard has no parents. -// */ -// @Test + // Set shardIdsOfCurrentLeases = new HashSet(); + // Map newLeaseMap = new HashMap(); + // Map kinesisShards = new HashMap(); + // + // String parentShardId = "shardId-parent"; + // kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + // shardIdsOfCurrentLeases.add(parentShardId); + // + // String adjacentParentShardId = "shardId-adjacentParent"; + // kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, + // null)); + // + // String shardId = "shardId-9-1"; + // Shard shard = ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null); + // kinesisShards.put(shardId, shard); + // + // Map memoizationContext = new HashMap<>(); + // assertTrue(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, + // shardIdsOfCurrentLeases, + // kinesisShards, + // newLeaseMap, + // memoizationContext)); + // assertEquals(1, newLeaseMap.size()); + // assertTrue(newLeaseMap.containsKey(adjacentParentShardId)); + // Lease adjacentParentLease = newLeaseMap.get(adjacentParentShardId); + // assertEquals(ExtendedSequenceNumber.LATEST, adjacentParentLease.checkpoint()); + // } + // + // /** + // * Test parentShardIds() when the shard has no parents. + // */ + // @Test // public void testGetParentShardIdsNoParents() { -// Shard shard = new Shard(); -// assertTrue(ShardSyncer.getParentShardIds(shard, null).isEmpty()); -// } -// -// /** -// * Test parentShardIds() when the shard has no parents. -// */ -// @Test + // Shard shard = new Shard(); + // assertTrue(ShardSyncer.getParentShardIds(shard, null).isEmpty()); + // } + // + // /** + // * Test parentShardIds() when the shard has no parents. + // */ + // @Test // public void testGetParentShardIdsTrimmedParents() { -// Map shardMap = new HashMap(); -// Shard shard = ShardObjectHelper.newShard("shardId-test", "foo", "bar", null); -// assertTrue(ShardSyncer.getParentShardIds(shard, shardMap).isEmpty()); -// } -// -// /** -// * Test parentShardIds() when the shard has a single parent. -// */ -// @Test + // Map shardMap = new HashMap(); + // Shard shard = ShardObjectHelper.newShard("shardId-test", "foo", "bar", null); + // assertTrue(ShardSyncer.getParentShardIds(shard, shardMap).isEmpty()); + // } + // + // /** + // * Test parentShardIds() when the shard has a single parent. + // */ + // @Test // public void testGetParentShardIdsSingleParent() { -// Map shardMap = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); -// -// Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, null, null); -// Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// -// shard.setParentShardId(null); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertTrue(parentShardIds.isEmpty()); -// -// shard.setAdjacentParentShardId(parentShardId); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// } -// -// /** -// * Test parentShardIds() when the shard has two parents, one is trimmed. -// */ -// @Test + // Map shardMap = new HashMap(); + // + // String parentShardId = "shardId-parent"; + // shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + // + // Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, null, null); + // Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(1, parentShardIds.size()); + // assertTrue(parentShardIds.contains(parentShardId)); + // + // shard.setParentShardId(null); + // parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertTrue(parentShardIds.isEmpty()); + // + // shard.setAdjacentParentShardId(parentShardId); + // parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(1, parentShardIds.size()); + // assertTrue(parentShardIds.contains(parentShardId)); + // } + // + // /** + // * Test parentShardIds() when the shard has two parents, one is trimmed. + // */ + // @Test // public void testGetParentShardIdsOneTrimmedParent() { -// Map shardMap = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// Shard parent = ShardObjectHelper.newShard(parentShardId, null, null, null); -// -// String adjacentParentShardId = "shardId-adjacentParent"; -// Shard adjacentParent = ShardObjectHelper.newShard(adjacentParentShardId, null, null, null); -// -// Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); -// -// shardMap.put(parentShardId, parent); -// Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// -// shardMap.remove(parentShardId); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertTrue(parentShardIds.isEmpty()); -// -// shardMap.put(adjacentParentShardId, adjacentParent); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(adjacentParentShardId)); -// } -// -// /** -// * Test parentShardIds() when the shard has two parents. -// */ -// @Test + // Map shardMap = new HashMap(); + // + // String parentShardId = "shardId-parent"; + // Shard parent = ShardObjectHelper.newShard(parentShardId, null, null, null); + // + // String adjacentParentShardId = "shardId-adjacentParent"; + // Shard adjacentParent = ShardObjectHelper.newShard(adjacentParentShardId, null, null, null); + // + // Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); + // + // shardMap.put(parentShardId, parent); + // Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(1, parentShardIds.size()); + // assertTrue(parentShardIds.contains(parentShardId)); + // + // shardMap.remove(parentShardId); + // parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertTrue(parentShardIds.isEmpty()); + // + // shardMap.put(adjacentParentShardId, adjacentParent); + // parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(1, parentShardIds.size()); + // assertTrue(parentShardIds.contains(adjacentParentShardId)); + // } + // + // /** + // * Test parentShardIds() when the shard has two parents. + // */ + // @Test // public void testGetParentShardIdsTwoParents() { -// Map shardMap = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); -// -// String adjacentParentShardId = "shardId-adjacentParent"; -// shardMap.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); -// -// Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); -// -// Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(2, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// assertTrue(parentShardIds.contains(adjacentParentShardId)); -// } -// -// /** -// */ -// @Test + // Map shardMap = new HashMap(); + // + // String parentShardId = "shardId-parent"; + // shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + // + // String adjacentParentShardId = "shardId-adjacentParent"; + // shardMap.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); + // + // Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); + // + // Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(2, parentShardIds.size()); + // assertTrue(parentShardIds.contains(parentShardId)); + // assertTrue(parentShardIds.contains(adjacentParentShardId)); + // } + // + // /** + // */ + // @Test // public void testNewLease() { -// Shard shard = new Shard(); -// String shardId = "shardId-95"; -// shard.setShardId(shardId); -// String parentShardId = "shardId-parent"; -// String adjacentParentShardId = "shardId-adjacentParent"; -// shard.setParentShardId(parentShardId); -// shard.setAdjacentParentShardId(adjacentParentShardId); -// -// Lease lease = ShardSyncer.newKCLLease(shard); -// assertEquals(shardId, lease.leaseKey()); -// assertNull(lease.checkpoint()); -// Set parentIds = lease.parentShardIds(); -// assertEquals(2, parentIds.size()); -// assertTrue(parentIds.contains(parentShardId)); -// assertTrue(parentIds.contains(adjacentParentShardId)); -// } -// -// /** -// * Test method for constructShardIdToShardMap. -// * -// * . -// */ -// @Test + // Shard shard = new Shard(); + // String shardId = "shardId-95"; + // shard.setShardId(shardId); + // String parentShardId = "shardId-parent"; + // String adjacentParentShardId = "shardId-adjacentParent"; + // shard.setParentShardId(parentShardId); + // shard.setAdjacentParentShardId(adjacentParentShardId); + // + // Lease lease = ShardSyncer.newKCLLease(shard); + // assertEquals(shardId, lease.leaseKey()); + // assertNull(lease.checkpoint()); + // Set parentIds = lease.parentShardIds(); + // assertEquals(2, parentIds.size()); + // assertTrue(parentIds.contains(parentShardId)); + // assertTrue(parentIds.contains(adjacentParentShardId)); + // } + // + // /** + // * Test method for constructShardIdToShardMap. + // * + // * . + // */ + // @Test // public void testConstructShardIdToShardMap() { -// List shards = new ArrayList(2); -// shards.add(ShardObjectHelper.newShard("shardId-0", null, null, null)); -// shards.add(ShardObjectHelper.newShard("shardId-1", null, null, null)); -// -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// assertEquals(shards.size(), shardIdToShardMap.size()); -// for (Shard shard : shards) { -// assertSame(shard, shardIdToShardMap.get(shard.getShardId())); -// } -// } -// -// /** -// * Test getOpenShards() - no shards are open. -// */ -// @Test + // List shards = new ArrayList(2); + // shards.add(ShardObjectHelper.newShard("shardId-0", null, null, null)); + // shards.add(ShardObjectHelper.newShard("shardId-1", null, null, null)); + // + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // assertEquals(shards.size(), shardIdToShardMap.size()); + // for (Shard shard : shards) { + // assertSame(shard, shardIdToShardMap.get(shard.getShardId())); + // } + // } + // + // /** + // * Test getOpenShards() - no shards are open. + // */ + // @Test // public void testGetOpenShardsNoneOpen() { -// List shards = new ArrayList(); -// shards.add(ShardObjectHelper.newShard("shardId-9384", -// null, -// null, -// ShardObjectHelper.newSequenceNumberRange("123", "345"))); -// assertTrue(ShardSyncer.getOpenShards(shards).isEmpty()); -// } -// -// /** -// * Test getOpenShards() - test null and max end sequence number. -// */ -// @Test + // List shards = new ArrayList(); + // shards.add(ShardObjectHelper.newShard("shardId-9384", + // null, + // null, + // ShardObjectHelper.newSequenceNumberRange("123", "345"))); + // assertTrue(ShardSyncer.getOpenShards(shards).isEmpty()); + // } + // + // /** + // * Test getOpenShards() - test null and max end sequence number. + // */ + // @Test // public void testGetOpenShardsNullAndMaxEndSeqNum() { -// List shards = new ArrayList(); -// String shardId = "shardId-2738"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("123", null); -// shards.add(ShardObjectHelper.newShard(shardId, null, null, sequenceNumberRange)); -// -// // Verify shard is considered open when it has a null end sequence number -// List openShards = ShardSyncer.getOpenShards(shards); -// assertEquals(1, openShards.size()); -// assertEquals(shardId, openShards.get(0).getShardId()); -// -// // Close shard before testing for max sequence number -// sequenceNumberRange.setEndingSequenceNumber("1000"); -// openShards = ShardSyncer.getOpenShards(shards); -// assertTrue(openShards.isEmpty()); -// -// // Verify shard is considered closed when the end sequence number is set to max allowed sequence number -// sequenceNumberRange.setEndingSequenceNumber(MAX_SEQUENCE_NUMBER.toString()); -// openShards = ShardSyncer.getOpenShards(shards); -// assertEquals(0, openShards.size()); -// } -// -// /** -// * Test isCandidateForCleanup -// * -// * @throws KinesisClientLibIOException -// */ -// @Test + // List shards = new ArrayList(); + // String shardId = "shardId-2738"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("123", null); + // shards.add(ShardObjectHelper.newShard(shardId, null, null, sequenceNumberRange)); + // + // // Verify shard is considered open when it has a null end sequence number + // List openShards = ShardSyncer.getOpenShards(shards); + // assertEquals(1, openShards.size()); + // assertEquals(shardId, openShards.get(0).getShardId()); + // + // // Close shard before testing for max sequence number + // sequenceNumberRange.setEndingSequenceNumber("1000"); + // openShards = ShardSyncer.getOpenShards(shards); + // assertTrue(openShards.isEmpty()); + // + // // Verify shard is considered closed when the end sequence number is set to max allowed sequence number + // sequenceNumberRange.setEndingSequenceNumber(MAX_SEQUENCE_NUMBER.toString()); + // openShards = ShardSyncer.getOpenShards(shards); + // assertEquals(0, openShards.size()); + // } + // + // /** + // * Test isCandidateForCleanup + // * + // * @throws KinesisClientLibIOException + // */ + // @Test // public void testIsCandidateForCleanup() throws KinesisClientLibIOException { -// String parentShardId = "shardId-0000"; -// String adjacentParentShardId = "shardId-0001"; -// String shardId = "shardId-0002"; -// Lease lease = newLease(shardId); -// List parentShardIds = new ArrayList<>(); -// parentShardIds.add(parentShardId); -// parentShardIds.add(adjacentParentShardId); -// lease.parentShardIds(parentShardIds); -// Set currentKinesisShardIds = new HashSet<>(); -// -// currentKinesisShardIds.add(shardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.clear(); -// assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.add(parentShardId); -// // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.clear(); -// assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.add(adjacentParentShardId); -// // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// currentKinesisShardIds.add(parentShardId); -// // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// currentKinesisShardIds.add(shardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// } -// -// /** -// * Test isCandidateForCleanup -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // String parentShardId = "shardId-0000"; + // String adjacentParentShardId = "shardId-0001"; + // String shardId = "shardId-0002"; + // Lease lease = newLease(shardId); + // List parentShardIds = new ArrayList<>(); + // parentShardIds.add(parentShardId); + // parentShardIds.add(adjacentParentShardId); + // lease.parentShardIds(parentShardIds); + // Set currentKinesisShardIds = new HashSet<>(); + // + // currentKinesisShardIds.add(shardId); + // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // + // currentKinesisShardIds.clear(); + // assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // + // currentKinesisShardIds.add(parentShardId); + // // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // + // currentKinesisShardIds.clear(); + // assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // + // currentKinesisShardIds.add(adjacentParentShardId); + // // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // currentKinesisShardIds.add(parentShardId); + // // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // currentKinesisShardIds.add(shardId); + // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // } + // + // /** + // * Test isCandidateForCleanup + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testIsCandidateForCleanupParentExists() throws KinesisClientLibIOException { -// String parentShardId = "shardId-0000"; -// String adjacentParentShardId = "shardId-0001"; -// String shardId = "shardId-0002"; -// Lease lease = newLease(shardId); -// List parentShardIds = new ArrayList<>(); -// parentShardIds.add(parentShardId); -// parentShardIds.add(adjacentParentShardId); -// lease.parentShardIds(parentShardIds); -// Set currentKinesisShardIds = new HashSet<>(); -// -// currentKinesisShardIds.add(parentShardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// } -// -// /** -// * Test isCandidateForCleanup -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // String parentShardId = "shardId-0000"; + // String adjacentParentShardId = "shardId-0001"; + // String shardId = "shardId-0002"; + // Lease lease = newLease(shardId); + // List parentShardIds = new ArrayList<>(); + // parentShardIds.add(parentShardId); + // parentShardIds.add(adjacentParentShardId); + // lease.parentShardIds(parentShardIds); + // Set currentKinesisShardIds = new HashSet<>(); + // + // currentKinesisShardIds.add(parentShardId); + // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // } + // + // /** + // * Test isCandidateForCleanup + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testIsCandidateForCleanupAdjacentParentExists() throws KinesisClientLibIOException { -// String parentShardId = "shardId-0000"; -// String adjacentParentShardId = "shardId-0001"; -// String shardId = "shardId-0002"; -// Lease lease = newLease(shardId); -// List parentShardIds = new ArrayList<>(); -// parentShardIds.add(parentShardId); -// parentShardIds.add(adjacentParentShardId); -// lease.parentShardIds(parentShardIds); -// Set currentKinesisShardIds = new HashSet<>(); -// -// currentKinesisShardIds.add(adjacentParentShardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// } -// -// /** -// * Test cleanup of lease for a shard that has been fully processed (and processing of child shards has begun). -// * -// * @throws DependencyException -// * @throws InvalidStateException -// * @throws ProvisionedThroughputException -// */ -// @Test + // String parentShardId = "shardId-0000"; + // String adjacentParentShardId = "shardId-0001"; + // String shardId = "shardId-0002"; + // Lease lease = newLease(shardId); + // List parentShardIds = new ArrayList<>(); + // parentShardIds.add(parentShardId); + // parentShardIds.add(adjacentParentShardId); + // lease.parentShardIds(parentShardIds); + // Set currentKinesisShardIds = new HashSet<>(); + // + // currentKinesisShardIds.add(adjacentParentShardId); + // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // } + // + // /** + // * Test cleanup of lease for a shard that has been fully processed (and processing of child shards has begun). + // * + // * @throws DependencyException + // * @throws InvalidStateException + // * @throws ProvisionedThroughputException + // */ + // @Test // public void testCleanupLeaseForClosedShard() -// throws DependencyException, InvalidStateException, ProvisionedThroughputException { -// String closedShardId = "shardId-2"; -// Lease leaseForClosedShard = newLease(closedShardId); -// leaseForClosedShard.checkpoint(new ExtendedSequenceNumber("1234")); -// dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); -// -// Set childShardIds = new HashSet<>(); -// List trackedLeases = new ArrayList<>(); -// Set parentShardIds = new HashSet<>(); -// parentShardIds.add(closedShardId); -// String childShardId1 = "shardId-5"; -// Lease childLease1 = newLease(childShardId1); -// childLease1.parentShardIds(parentShardIds); -// childLease1.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); -// String childShardId2 = "shardId-7"; -// Lease childLease2 = newLease(childShardId2); -// childLease2.parentShardIds(parentShardIds); -// childLease2.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); -// Map trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// -// // empty list of leases -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // closed shard has not been fully processed yet (checkpoint != SHARD_END) -// trackedLeases.add(leaseForClosedShard); -// trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // closed shard has been fully processed yet (checkpoint == SHARD_END) -// leaseForClosedShard.checkpoint(ExtendedSequenceNumber.SHARD_END); -// dynamoDBLeaseRefresher.updateLease(leaseForClosedShard); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // lease for only one child exists -// childShardIds.add(childShardId1); -// childShardIds.add(childShardId2); -// dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); -// dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease1); -// trackedLeases.add(childLease1); -// trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // leases for both children exists, but they are both at TRIM_HORIZON -// dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease2); -// trackedLeases.add(childLease2); -// trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // leases for both children exists, one is at TRIM_HORIZON -// childLease1.checkpoint(new ExtendedSequenceNumber("34890")); -// dynamoDBLeaseRefresher.updateLease(childLease1); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // leases for both children exists, NONE of them are at TRIM_HORIZON -// childLease2.checkpoint(new ExtendedSequenceNumber("43789")); -// dynamoDBLeaseRefresher.updateLease(childLease2); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// } -// -// /** -// * Test we can handle trimmed Kinesis shards (absent from the shard list), and valid closed shards. -// * -// * @throws KinesisClientLibIOException -// */ -// @Test + // throws DependencyException, InvalidStateException, ProvisionedThroughputException { + // String closedShardId = "shardId-2"; + // Lease leaseForClosedShard = newLease(closedShardId); + // leaseForClosedShard.checkpoint(new ExtendedSequenceNumber("1234")); + // dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); + // + // Set childShardIds = new HashSet<>(); + // List trackedLeases = new ArrayList<>(); + // Set parentShardIds = new HashSet<>(); + // parentShardIds.add(closedShardId); + // String childShardId1 = "shardId-5"; + // Lease childLease1 = newLease(childShardId1); + // childLease1.parentShardIds(parentShardIds); + // childLease1.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + // String childShardId2 = "shardId-7"; + // Lease childLease2 = newLease(childShardId2); + // childLease2.parentShardIds(parentShardIds); + // childLease2.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + // Map trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + // + // // empty list of leases + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // closed shard has not been fully processed yet (checkpoint != SHARD_END) + // trackedLeases.add(leaseForClosedShard); + // trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // closed shard has been fully processed yet (checkpoint == SHARD_END) + // leaseForClosedShard.checkpoint(ExtendedSequenceNumber.SHARD_END); + // dynamoDBLeaseRefresher.updateLease(leaseForClosedShard); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // lease for only one child exists + // childShardIds.add(childShardId1); + // childShardIds.add(childShardId2); + // dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); + // dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease1); + // trackedLeases.add(childLease1); + // trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // leases for both children exists, but they are both at TRIM_HORIZON + // dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease2); + // trackedLeases.add(childLease2); + // trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // leases for both children exists, one is at TRIM_HORIZON + // childLease1.checkpoint(new ExtendedSequenceNumber("34890")); + // dynamoDBLeaseRefresher.updateLease(childLease1); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // leases for both children exists, NONE of them are at TRIM_HORIZON + // childLease2.checkpoint(new ExtendedSequenceNumber("43789")); + // dynamoDBLeaseRefresher.updateLease(childLease2); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // } + // + // /** + // * Test we can handle trimmed Kinesis shards (absent from the shard list), and valid closed shards. + // * + // * @throws KinesisClientLibIOException + // */ + // @Test // public void testAssertShardCoveredOrAbsentTestAbsentAndValid() throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// Shard closedShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); -// SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); -// Shard child1 = -// ShardObjectHelper.newShard("shardId-54879", expectedClosedShardId, null, childSequenceNumberRange); -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // test for case where shard has been trimmed (absent from list) -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // Populate shards. -// shards.add(closedShard); -// shards.add(child1); -// shardIdToShardMap.put(expectedClosedShardId, closedShard); -// shardIdToShardMap.put(child1.getShardId(), child1); -// shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// -// // test degenerate split/merge -// child1.setHashKeyRange(hashKeyRange); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // test merge -// child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("10", "2985")); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("3", "25")); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // test split -// HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); -// HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); -// child1.setHashKeyRange(childHashKeyRange1); -// Shard child2 = ShardObjectHelper.newShard("shardId-43789", -// null, -// expectedClosedShardId, -// childSequenceNumberRange, -// childHashKeyRange2); -// shards.add(child2); -// shardIdToShardMap.put(child2.getShardId(), child2); -// shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// -// /** -// * Test we throw an exception if the shard is open -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // List shards = new ArrayList<>(); + // String expectedClosedShardId = "shardId-34098"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // Shard closedShard = + // ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); + // SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); + // Shard child1 = + // ShardObjectHelper.newShard("shardId-54879", expectedClosedShardId, null, + // childSequenceNumberRange); + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // Map> shardIdToChildShardIdsMap = + // ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // Set closedShardIds = new HashSet<>(); + // closedShardIds.add(expectedClosedShardId); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // + // // test for case where shard has been trimmed (absent from list) + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // + // // Populate shards. + // shards.add(closedShard); + // shards.add(child1); + // shardIdToShardMap.put(expectedClosedShardId, closedShard); + // shardIdToShardMap.put(child1.getShardId(), child1); + // shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // + // // test degenerate split/merge + // child1.setHashKeyRange(hashKeyRange); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // + // // test merge + // child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("10", "2985")); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("3", "25")); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // + // // test split + // HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); + // HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); + // child1.setHashKeyRange(childHashKeyRange1); + // Shard child2 = ShardObjectHelper.newShard("shardId-43789", + // null, + // expectedClosedShardId, + // childSequenceNumberRange, + // childHashKeyRange2); + // shards.add(child2); + // shardIdToShardMap.put(child2.getShardId(), child2); + // shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // } + // + // /** + // * Test we throw an exception if the shard is open + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testAssertShardCoveredOrAbsentTestOpen() throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", null); -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// Shard openShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); -// shards.add(openShard); -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// -// /** -// * Test we throw an exception if there are no children -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // List shards = new ArrayList<>(); + // String expectedClosedShardId = "shardId-34098"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", null); + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // Shard openShard = + // ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); + // shards.add(openShard); + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // Map> shardIdToChildShardIdsMap = + // ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // Set closedShardIds = new HashSet<>(); + // closedShardIds.add(expectedClosedShardId); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // } + // + // /** + // * Test we throw an exception if there are no children + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testAssertShardCoveredOrAbsentTestNoChildren() throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// Shard closedShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); -// shards.add(closedShard); -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// -// /** -// * Test we throw an exception if children don't cover hash key range (min of children > min of parent) -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // List shards = new ArrayList<>(); + // String expectedClosedShardId = "shardId-34098"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // Shard closedShard = + // ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); + // shards.add(closedShard); + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // Map> shardIdToChildShardIdsMap = + // ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // Set closedShardIds = new HashSet<>(); + // closedShardIds.add(expectedClosedShardId); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // } + // + // /** + // * Test we throw an exception if children don't cover hash key range (min of children > min of parent) + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testAssertShardCoveredOrAbsentTestIncompleteSplitMin() throws KinesisClientLibIOException { -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("12", "15"); -// HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); -// testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); -// } -// -// /** -// * Test we throw an exception if children don't cover hash key range (max of children < max of parent) -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("12", "15"); + // HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); + // testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); + // } + // + // /** + // * Test we throw an exception if children don't cover hash key range (max of children < max of parent) + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testAssertShardCoveredOrAbsentTestIncompleteSplitMax() throws KinesisClientLibIOException { -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); -// HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "23"); -// testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); -// } -// -// private void testAssertShardCoveredOrAbsentTestIncompleteSplit(HashKeyRange parentHashKeyRange, -// HashKeyRange child1HashKeyRange, -// HashKeyRange child2HashKeyRange) -// throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); -// Shard closedShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, parentHashKeyRange); -// shards.add(closedShard); -// -// SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); -// Shard child1 = ShardObjectHelper.newShard("shardId-43789", -// null, -// expectedClosedShardId, -// childSequenceNumberRange, -// child1HashKeyRange); -// shards.add(child1); -// Shard child2 = ShardObjectHelper.newShard("shardId-43789", -// null, -// expectedClosedShardId, -// childSequenceNumberRange, -// child2HashKeyRange); -// shards.add(child2); -// -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); + // HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "23"); + // testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); + // } + // + // private void testAssertShardCoveredOrAbsentTestIncompleteSplit(HashKeyRange parentHashKeyRange, + // HashKeyRange child1HashKeyRange, + // HashKeyRange child2HashKeyRange) + // throws KinesisClientLibIOException { + // List shards = new ArrayList<>(); + // String expectedClosedShardId = "shardId-34098"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); + // Shard closedShard = + // ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, + // parentHashKeyRange); + // shards.add(closedShard); + // + // SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); + // Shard child1 = ShardObjectHelper.newShard("shardId-43789", + // null, + // expectedClosedShardId, + // childSequenceNumberRange, + // child1HashKeyRange); + // shards.add(child1); + // Shard child2 = ShardObjectHelper.newShard("shardId-43789", + // null, + // expectedClosedShardId, + // childSequenceNumberRange, + // child2HashKeyRange); + // shards.add(child2); + // + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // Map> shardIdToChildShardIdsMap = + // ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // Set closedShardIds = new HashSet<>(); + // closedShardIds.add(expectedClosedShardId); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // } + // /** * Helper method. - * - * @param shardId - * @return */ private static Lease newLease(final String shardId) { final Lease lease = new Lease(); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java index d0870d51..9c855c4b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java @@ -15,6 +15,15 @@ package software.amazon.kinesis.leases; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -31,15 +40,6 @@ import software.amazon.awssdk.services.kinesis.model.ResourceInUseException; import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.services.kinesis.model.Shard; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.isA; import static org.hamcrest.CoreMatchers.nullValue; @@ -72,21 +72,29 @@ public class KinesisShardDetectorTest { @Mock private KinesisAsyncClient client; + @Mock private CompletableFuture mockFuture; @Before public void setup() { - shardDetector = new KinesisShardDetector(client, STREAM_NAME, LIST_SHARDS_BACKOFF_TIME_IN_MILLIS, - MAX_LIST_SHARDS_RETRY_ATTEMPTS, LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, - MAX_CACHE_MISSES_BEFORE_RELOAD, CACHE_MISS_WARNING_MODULUS); + shardDetector = new KinesisShardDetector( + client, + STREAM_NAME, + LIST_SHARDS_BACKOFF_TIME_IN_MILLIS, + MAX_LIST_SHARDS_RETRY_ATTEMPTS, + LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, + MAX_CACHE_MISSES_BEFORE_RELOAD, + CACHE_MISS_WARNING_MODULUS); } @Test public void testListShardsSingleResponse() { final List expectedShards = new ArrayList<>(); - final ListShardsResponse listShardsResponse = ListShardsResponse.builder().nextToken(null) - .shards(expectedShards).build(); + final ListShardsResponse listShardsResponse = ListShardsResponse.builder() + .nextToken(null) + .shards(expectedShards) + .build(); final CompletableFuture future = CompletableFuture.completedFuture(listShardsResponse); when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); @@ -106,8 +114,7 @@ public class KinesisShardDetectorTest { try { shardDetector.listShards(); } finally { - verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)) - .listShards(any(ListShardsRequest.class)); + verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)).listShards(any(ListShardsRequest.class)); } } @@ -123,7 +130,6 @@ public class KinesisShardDetectorTest { assertThat(shards, nullValue()); verify(client).listShards(any(ListShardsRequest.class)); - } @Test(expected = LimitExceededException.class) @@ -137,8 +143,7 @@ public class KinesisShardDetectorTest { try { shardDetector.listShards(); } finally { - verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)) - .listShards(any(ListShardsRequest.class)); + verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)).listShards(any(ListShardsRequest.class)); } } @@ -165,7 +170,6 @@ public class KinesisShardDetectorTest { when(client.listShards(any(ListShardsRequest.class))).thenReturn(mockFuture); shardDetector.listShards(); - } @Test @@ -183,8 +187,8 @@ public class KinesisShardDetectorTest { @Test public void testGetShardEmptyCache() { final String shardId = String.format(SHARD_ID, 1); - final CompletableFuture future = CompletableFuture - .completedFuture(ListShardsResponse.builder().shards(createShardList()).build()); + final CompletableFuture future = CompletableFuture.completedFuture( + ListShardsResponse.builder().shards(createShardList()).build()); when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); @@ -213,36 +217,40 @@ public class KinesisShardDetectorTest { final List shards = new ArrayList<>(createShardList()); shards.add(Shard.builder().shardId(shardId).build()); - final CompletableFuture future = CompletableFuture - .completedFuture(ListShardsResponse.builder().shards(shards).build()); + final CompletableFuture future = CompletableFuture.completedFuture( + ListShardsResponse.builder().shards(shards).build()); shardDetector.cachedShardMap(createShardList()); when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); final List responses = IntStream.range(0, MAX_CACHE_MISSES_BEFORE_RELOAD + 1) - .mapToObj(x -> shardDetector.shard(shardId)).collect(Collectors.toList()); + .mapToObj(x -> shardDetector.shard(shardId)) + .collect(Collectors.toList()); IntStream.range(0, MAX_CACHE_MISSES_BEFORE_RELOAD).forEach(x -> { assertThat(responses.get(x), nullValue()); }); - assertThat(responses.get(MAX_CACHE_MISSES_BEFORE_RELOAD), equalTo(Shard.builder().shardId(shardId).build())); + assertThat( + responses.get(MAX_CACHE_MISSES_BEFORE_RELOAD), + equalTo(Shard.builder().shardId(shardId).build())); verify(client).listShards(any(ListShardsRequest.class)); } @Test public void testGetShardNonExistentShardForceRefresh() { final String shardId = String.format(SHARD_ID, 5); - final CompletableFuture future = CompletableFuture - .completedFuture(ListShardsResponse.builder().shards(createShardList()).build()); + final CompletableFuture future = CompletableFuture.completedFuture( + ListShardsResponse.builder().shards(createShardList()).build()); shardDetector.cachedShardMap(createShardList()); when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); final List responses = IntStream.range(0, MAX_CACHE_MISSES_BEFORE_RELOAD + 1) - .mapToObj(x -> shardDetector.shard(shardId)).collect(Collectors.toList()); + .mapToObj(x -> shardDetector.shard(shardId)) + .collect(Collectors.toList()); responses.forEach(response -> assertThat(response, nullValue())); assertThat(shardDetector.cacheMisses().get(), equalTo(0)); @@ -250,7 +258,8 @@ public class KinesisShardDetectorTest { } private List createShardList() { - return Arrays.asList(Shard.builder().shardId(String.format(SHARD_ID, 0)).build(), + return Arrays.asList( + Shard.builder().shardId(String.format(SHARD_ID, 0)).build(), Shard.builder().shardId(String.format(SHARD_ID, 1)).build(), Shard.builder().shardId(String.format(SHARD_ID, 2)).build(), Shard.builder().shardId(String.format(SHARD_ID, 3)).build(), diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java index 8f825875..1bfe1bac 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java @@ -34,14 +34,24 @@ public class LeaseBuilder { private ExtendedSequenceNumber checkpoint; private ExtendedSequenceNumber pendingCheckpoint; private Long ownerSwitchesSinceCheckpoint = 0L; - private Set parentShardIds = new HashSet<>(); + private Set parentShardIds = new HashSet<>(); private Set childShardIds = new HashSet<>(); private byte[] pendingCheckpointState; private HashKeyRangeForLease hashKeyRangeForLease; public Lease build() { - return new Lease(leaseKey, leaseOwner, leaseCounter, concurrencyToken, lastCounterIncrementNanos, checkpoint, - pendingCheckpoint, ownerSwitchesSinceCheckpoint, parentShardIds, childShardIds, - pendingCheckpointState, hashKeyRangeForLease); + return new Lease( + leaseKey, + leaseOwner, + leaseCounter, + concurrencyToken, + lastCounterIncrementNanos, + checkpoint, + pendingCheckpoint, + ownerSwitchesSinceCheckpoint, + parentShardIds, + childShardIds, + pendingCheckpointState, + hashKeyRangeForLease); } -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCleanupManagerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCleanupManagerTest.java index 2e691844..9d51351c 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCleanupManagerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCleanupManagerTest.java @@ -15,6 +15,14 @@ package software.amazon.kinesis.leases; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ScheduledExecutorService; +import java.util.stream.Collectors; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -29,14 +37,6 @@ import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ScheduledExecutorService; -import java.util.stream.Collectors; - import static org.mockito.Matchers.any; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -45,43 +45,45 @@ import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class LeaseCleanupManagerTest { - private ShardInfo shardInfo; - private StreamIdentifier streamIdentifier; - private String concurrencyToken = "1234"; + private static final ShardInfo SHARD_INFO = + new ShardInfo("shardId", "concurrencyToken", Collections.emptySet(), ExtendedSequenceNumber.LATEST); - private String shardId = "shardId"; - private String splitParent = "splitParent"; - private String mergeParent1 = "mergeParent-1"; - private String mergeParent2 = "mergeParent-2"; + private static final StreamIdentifier STREAM_IDENTIFIER = StreamIdentifier.singleStreamInstance("streamName"); - private Duration maxFutureWait = Duration.ofSeconds(1); - private long leaseCleanupIntervalMillis = Duration.ofSeconds(1).toMillis(); - private long completedLeaseCleanupIntervalMillis = Duration.ofSeconds(0).toMillis(); - private long garbageLeaseCleanupIntervalMillis = Duration.ofSeconds(0).toMillis(); + private final long leaseCleanupIntervalMillis = Duration.ofSeconds(1).toMillis(); + private final long completedLeaseCleanupIntervalMillis = + Duration.ofSeconds(0).toMillis(); + private final long garbageLeaseCleanupIntervalMillis = Duration.ofSeconds(0).toMillis(); private boolean cleanupLeasesOfCompletedShards = true; private LeaseCleanupManager leaseCleanupManager; private static final MetricsFactory NULL_METRICS_FACTORY = new NullMetricsFactory(); @Mock private LeaseRefresher leaseRefresher; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private ShardDetector shardDetector; + @Mock private ScheduledExecutorService deletionThreadPool; @Before public void setUp() throws Exception { - shardInfo = new ShardInfo(shardId, concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - streamIdentifier = StreamIdentifier.singleStreamInstance("streamName"); - leaseCleanupManager = new LeaseCleanupManager(leaseCoordinator, NULL_METRICS_FACTORY, deletionThreadPool, - cleanupLeasesOfCompletedShards, leaseCleanupIntervalMillis, completedLeaseCleanupIntervalMillis, + leaseCleanupManager = new LeaseCleanupManager( + leaseCoordinator, + NULL_METRICS_FACTORY, + deletionThreadPool, + cleanupLeasesOfCompletedShards, + leaseCleanupIntervalMillis, + completedLeaseCleanupIntervalMillis, garbageLeaseCleanupIntervalMillis); when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); - when(leaseCoordinator.updateLease(any(Lease.class), any(UUID.class), any(String.class), any(String.class))).thenReturn(true); + when(leaseCoordinator.updateLease(any(Lease.class), any(UUID.class), any(String.class), any(String.class))) + .thenReturn(true); } /** @@ -112,10 +114,8 @@ public class LeaseCleanupManagerTest { */ @Test public final void testParentShardLeaseDeletedSplitCase() throws Exception { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - - verifyExpectedDeletedLeasesCompletedShardCase(shardInfo, childShardsForSplit(), ExtendedSequenceNumber.LATEST, 1); + verifyExpectedDeletedLeasesCompletedShardCase( + SHARD_INFO, childShardsForSplit(), ExtendedSequenceNumber.LATEST, 1); } /** @@ -124,10 +124,8 @@ public class LeaseCleanupManagerTest { */ @Test public final void testParentShardLeaseDeletedMergeCase() throws Exception { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - - verifyExpectedDeletedLeasesCompletedShardCase(shardInfo, childShardsForMerge(), ExtendedSequenceNumber.LATEST, 1); + verifyExpectedDeletedLeasesCompletedShardCase( + SHARD_INFO, childShardsForMerge(), ExtendedSequenceNumber.LATEST, 1); } /** @@ -136,15 +134,19 @@ public class LeaseCleanupManagerTest { */ @Test public final void testNoLeasesDeletedWhenNotEnabled() throws Exception { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); cleanupLeasesOfCompletedShards = false; - leaseCleanupManager = new LeaseCleanupManager(leaseCoordinator, NULL_METRICS_FACTORY, deletionThreadPool, - cleanupLeasesOfCompletedShards, leaseCleanupIntervalMillis, completedLeaseCleanupIntervalMillis, + leaseCleanupManager = new LeaseCleanupManager( + leaseCoordinator, + NULL_METRICS_FACTORY, + deletionThreadPool, + cleanupLeasesOfCompletedShards, + leaseCleanupIntervalMillis, + completedLeaseCleanupIntervalMillis, garbageLeaseCleanupIntervalMillis); - verifyExpectedDeletedLeasesCompletedShardCase(shardInfo, childShardsForSplit(), ExtendedSequenceNumber.LATEST, 0); + verifyExpectedDeletedLeasesCompletedShardCase( + SHARD_INFO, childShardsForSplit(), ExtendedSequenceNumber.LATEST, 0); } /** @@ -155,10 +157,7 @@ public class LeaseCleanupManagerTest { public final void testNoCleanupWhenSomeChildShardLeasesAreNotPresent() throws Exception { List childShards = childShardsForSplit(); - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - - verifyExpectedDeletedLeasesCompletedShardCase(shardInfo, childShards, ExtendedSequenceNumber.LATEST, false, 0); + verifyExpectedDeletedLeasesCompletedShardCase(SHARD_INFO, childShards, ExtendedSequenceNumber.LATEST, false, 0); } /** @@ -179,12 +178,9 @@ public class LeaseCleanupManagerTest { testParentShardLeaseNotDeletedWhenChildIsAtPosition(ExtendedSequenceNumber.AT_TIMESTAMP); } - private final void testParentShardLeaseNotDeletedWhenChildIsAtPosition(ExtendedSequenceNumber extendedSequenceNumber) + private void testParentShardLeaseNotDeletedWhenChildIsAtPosition(ExtendedSequenceNumber extendedSequenceNumber) throws Exception { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - - verifyExpectedDeletedLeasesCompletedShardCase(shardInfo, childShardsForMerge(), extendedSequenceNumber, 0); + verifyExpectedDeletedLeasesCompletedShardCase(SHARD_INFO, childShardsForMerge(), extendedSequenceNumber, 0); } /** @@ -192,10 +188,19 @@ public class LeaseCleanupManagerTest { */ @Test public final void testLeaseNotDeletedWhenParentsStillPresent() throws Exception { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.singleton("parent"), - ExtendedSequenceNumber.LATEST); + final ShardInfo shardInfo = new ShardInfo( + "shardId-0", "concurrencyToken", Collections.singleton("parent"), ExtendedSequenceNumber.LATEST); - verifyExpectedDeletedLeasesCompletedShardCase(shardInfo, childShardsForMerge(), ExtendedSequenceNumber.LATEST, 0); + verifyExpectedDeletedLeasesCompletedShardCase( + shardInfo, childShardsForMerge(), ExtendedSequenceNumber.LATEST, 0); + } + + /** + * Verify {@link NullPointerException} is not thrown when a null lease is enqueued. + */ + @Test + public void testEnqueueNullLease() { + leaseCleanupManager.enqueueForDeletion(createLeasePendingDeletion(null, SHARD_INFO)); } /** @@ -203,65 +208,86 @@ public class LeaseCleanupManagerTest { */ @Test public final void testLeaseDeletedWhenShardDoesNotExist() throws Exception { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - final Lease heldLease = LeaseHelper.createLease(shardInfo.shardId(), "leaseOwner", Collections.singleton("parentShardId")); + final Lease heldLease = + LeaseHelper.createLease(SHARD_INFO.shardId(), "leaseOwner", Collections.singleton("parentShardId")); testLeaseDeletedWhenShardDoesNotExist(heldLease); } /** * Tests ResourceNotFound case when completed lease cleanup is disabled. - * @throws Exception */ @Test public final void testLeaseDeletedWhenShardDoesNotExistAndCleanupCompletedLeaseDisabled() throws Exception { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - final Lease heldLease = LeaseHelper.createLease(shardInfo.shardId(), "leaseOwner", Collections.singleton("parentShardId")); + final Lease heldLease = + LeaseHelper.createLease(SHARD_INFO.shardId(), "leaseOwner", Collections.singleton("parentShardId")); cleanupLeasesOfCompletedShards = false; - leaseCleanupManager = new LeaseCleanupManager(leaseCoordinator, NULL_METRICS_FACTORY, deletionThreadPool, - cleanupLeasesOfCompletedShards, leaseCleanupIntervalMillis, completedLeaseCleanupIntervalMillis, + leaseCleanupManager = new LeaseCleanupManager( + leaseCoordinator, + NULL_METRICS_FACTORY, + deletionThreadPool, + cleanupLeasesOfCompletedShards, + leaseCleanupIntervalMillis, + completedLeaseCleanupIntervalMillis, garbageLeaseCleanupIntervalMillis); testLeaseDeletedWhenShardDoesNotExist(heldLease); } - public void testLeaseDeletedWhenShardDoesNotExist(Lease heldLease) throws Exception { + private void testLeaseDeletedWhenShardDoesNotExist(Lease heldLease) throws Exception { when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); - when(leaseCoordinator.getCurrentlyHeldLease(shardInfo.shardId())).thenReturn(heldLease); + when(leaseCoordinator.getCurrentlyHeldLease(SHARD_INFO.shardId())).thenReturn(heldLease); when(shardDetector.getChildShards(any(String.class))).thenThrow(ResourceNotFoundException.class); when(leaseRefresher.getLease(heldLease.leaseKey())).thenReturn(heldLease); - leaseCleanupManager.enqueueForDeletion(new LeasePendingDeletion(streamIdentifier, heldLease, shardInfo, shardDetector)); + leaseCleanupManager.enqueueForDeletion(createLeasePendingDeletion(heldLease, SHARD_INFO)); leaseCleanupManager.cleanupLeases(); - verify(shardDetector, times(1)).getChildShards(shardInfo.shardId()); - verify(leaseRefresher, times(1)).deleteLease(heldLease); + verify(shardDetector).getChildShards(SHARD_INFO.shardId()); + verify(leaseRefresher).deleteLease(heldLease); } - private final void verifyExpectedDeletedLeasesCompletedShardCase(ShardInfo shardInfo, List childShards, - ExtendedSequenceNumber extendedSequenceNumber, - int expectedDeletedLeases) throws Exception { - verifyExpectedDeletedLeasesCompletedShardCase(shardInfo, childShards, extendedSequenceNumber, true, expectedDeletedLeases); + private void verifyExpectedDeletedLeasesCompletedShardCase( + ShardInfo shardInfo, + List childShards, + ExtendedSequenceNumber extendedSequenceNumber, + int expectedDeletedLeases) + throws Exception { + verifyExpectedDeletedLeasesCompletedShardCase( + shardInfo, childShards, extendedSequenceNumber, true, expectedDeletedLeases); } - private final void verifyExpectedDeletedLeasesCompletedShardCase(ShardInfo shardInfo, List childShards, - ExtendedSequenceNumber extendedSequenceNumber, - boolean childShardLeasesPresent, - int expectedDeletedLeases) throws Exception { + private void verifyExpectedDeletedLeasesCompletedShardCase( + ShardInfo shardInfo, + List childShards, + ExtendedSequenceNumber extendedSequenceNumber, + boolean childShardLeasesPresent, + int expectedDeletedLeases) + throws Exception { + final Lease lease = LeaseHelper.createLease( + shardInfo.shardId(), + "leaseOwner", + shardInfo.parentShardIds(), + childShards.stream().map(ChildShard::shardId).collect(Collectors.toSet())); + final List childShardLeases = childShards.stream() + .map(c -> LeaseHelper.createLease( + ShardInfo.getLeaseKey(shardInfo, c.shardId()), + "leaseOwner", + Collections.singleton(shardInfo.shardId()), + Collections.emptyList(), + extendedSequenceNumber)) + .collect(Collectors.toList()); - final Lease lease = LeaseHelper.createLease(shardInfo.shardId(), "leaseOwner", shardInfo.parentShardIds(), - childShards.stream().map(c -> c.shardId()).collect(Collectors.toSet())); - final List childShardLeases = childShards.stream().map(c -> LeaseHelper.createLease( - ShardInfo.getLeaseKey(shardInfo, c.shardId()), "leaseOwner", Collections.singleton(shardInfo.shardId()), - Collections.emptyList(), extendedSequenceNumber)).collect(Collectors.toList()); - - final List parentShardLeases = lease.parentShardIds().stream().map(p -> - LeaseHelper.createLease(ShardInfo.getLeaseKey(shardInfo, p), "leaseOwner", Collections.emptyList(), - Collections.singleton(shardInfo.shardId()), extendedSequenceNumber)).collect(Collectors.toList()); + final List parentShardLeases = lease.parentShardIds().stream() + .map(p -> LeaseHelper.createLease( + ShardInfo.getLeaseKey(shardInfo, p), + "leaseOwner", + Collections.emptyList(), + Collections.singleton(shardInfo.shardId()), + extendedSequenceNumber)) + .collect(Collectors.toList()); when(leaseRefresher.getLease(lease.leaseKey())).thenReturn(lease); for (Lease parentShardLease : parentShardLeases) { @@ -273,15 +299,15 @@ public class LeaseCleanupManagerTest { } } - leaseCleanupManager.enqueueForDeletion(new LeasePendingDeletion(streamIdentifier, lease, shardInfo, shardDetector)); + leaseCleanupManager.enqueueForDeletion(createLeasePendingDeletion(lease, shardInfo)); leaseCleanupManager.cleanupLeases(); - verify(shardDetector, times(1)).getChildShards(shardInfo.shardId()); + verify(shardDetector).getChildShards(shardInfo.shardId()); verify(leaseRefresher, times(expectedDeletedLeases)).deleteLease(any(Lease.class)); } private List childShardsForSplit() { - List parentShards = Arrays.asList(splitParent); + final List parentShards = Collections.singletonList("splitParent"); ChildShard leftChild = ChildShard.builder() .shardId("leftChild") @@ -294,11 +320,11 @@ public class LeaseCleanupManagerTest { .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) .build(); - return Arrays.asList(leftChild, rightChild); + return Arrays.asList(leftChild, rightChild); } private List childShardsForMerge() { - List parentShards = Arrays.asList(mergeParent1, mergeParent2); + final List parentShards = Arrays.asList("mergeParent1", "mergeParent2"); ChildShard child = ChildShard.builder() .shardId("onlyChild") @@ -308,4 +334,8 @@ public class LeaseCleanupManagerTest { return Collections.singletonList(child); } + + private LeasePendingDeletion createLeasePendingDeletion(final Lease lease, final ShardInfo shardInfo) { + return new LeasePendingDeletion(STREAM_IDENTIFIER, lease, shardInfo, shardDetector); + } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java index 186fe290..0bc285a6 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java @@ -14,18 +14,21 @@ */ package software.amazon.kinesis.leases; -import java.awt.*; +import java.awt.Button; +import java.awt.Dimension; +import java.awt.GridLayout; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; - -import javax.swing.*; +import javax.swing.BoxLayout; +import javax.swing.JFrame; +import javax.swing.JLabel; +import javax.swing.JPanel; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; @@ -54,19 +57,23 @@ public class LeaseCoordinatorExerciser { private static final long INITIAL_LEASE_TABLE_READ_CAPACITY = 10L; private static final long INITIAL_LEASE_TABLE_WRITE_CAPACITY = 50L; - public static void main(String[] args) throws InterruptedException, DependencyException, InvalidStateException, - ProvisionedThroughputException, IOException { - + public static void main(String[] args) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { int numCoordinators = 9; int numLeases = 73; int leaseDurationMillis = 10000; int epsilonMillis = 100; DynamoDbAsyncClient dynamoDBClient = DynamoDbAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); - LeaseRefresher leaseRefresher = new DynamoDBLeaseRefresher("nagl_ShardProgress", dynamoDBClient, - new DynamoDBLeaseSerializer(), true, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); + LeaseRefresher leaseRefresher = new DynamoDBLeaseRefresher( + "nagl_ShardProgress", + dynamoDBClient, + new DynamoDBLeaseSerializer(), + true, + TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); if (leaseRefresher.createLeaseTableIfNotExists()) { log.info("Waiting for newly created lease table"); @@ -77,17 +84,31 @@ public class LeaseCoordinatorExerciser { } CloudWatchAsyncClient client = CloudWatchAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); - CloudWatchMetricsFactory metricsFactory = new CloudWatchMetricsFactory(client, "testNamespace", 30 * 1000, 1000, - METRICS_LEVEL, MetricsConfig.METRICS_DIMENSIONS_ALL, FLUSH_SIZE); + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); + CloudWatchMetricsFactory metricsFactory = new CloudWatchMetricsFactory( + client, + "testNamespace", + 30 * 1000, + 1000, + METRICS_LEVEL, + MetricsConfig.METRICS_DIMENSIONS_ALL, + FLUSH_SIZE); final List coordinators = new ArrayList<>(); for (int i = 0; i < numCoordinators; i++) { String workerIdentifier = "worker-" + Integer.toString(i); - LeaseCoordinator coord = new DynamoDBLeaseCoordinator(leaseRefresher, workerIdentifier, leaseDurationMillis, - epsilonMillis, MAX_LEASES_FOR_WORKER, MAX_LEASES_TO_STEAL_AT_ONE_TIME, - MAX_LEASE_RENEWER_THREAD_COUNT, INITIAL_LEASE_TABLE_READ_CAPACITY, - INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + LeaseCoordinator coord = new DynamoDBLeaseCoordinator( + leaseRefresher, + workerIdentifier, + leaseDurationMillis, + epsilonMillis, + MAX_LEASES_FOR_WORKER, + MAX_LEASES_TO_STEAL_AT_ONE_TIME, + MAX_LEASE_RENEWER_THREAD_COUNT, + INITIAL_LEASE_TABLE_READ_CAPACITY, + INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); coordinators.add(coord); } @@ -130,7 +151,6 @@ public class LeaseCoordinatorExerciser { button.setLabel("Stop " + coord.workerIdentifier()); } } - }); coordPanel.add(button); @@ -165,12 +185,14 @@ public class LeaseCoordinatorExerciser { public int compare(final Lease arg0, final Lease arg1) { return arg0.leaseKey().compareTo(arg1.leaseKey()); } - }); StringBuilder builder = new StringBuilder(); builder.append(""); - builder.append(workerIdentifier).append(":").append(asgn.size()).append(" "); + builder.append(workerIdentifier) + .append(":") + .append(asgn.size()) + .append(" "); for (Lease lease : asgn) { String leaseKey = lease.leaseKey(); @@ -186,8 +208,10 @@ public class LeaseCoordinatorExerciser { greenNesses.put(leaseKey, greenNess); lastOwners.put(leaseKey, lease.leaseOwner()); - builder.append(String.format("%03d", - String.format("#00%02x00", greenNess), Integer.parseInt(leaseKey))).append(" "); + builder.append(String.format( + "%03d", + String.format("#00%02x00", greenNess), Integer.parseInt(leaseKey))) + .append(" "); } builder.append(""); @@ -208,7 +232,6 @@ public class LeaseCoordinatorExerciser { } } } - }.start(); frame.pack(); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseHelper.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseHelper.java index 0e10bc48..f17fc370 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseHelper.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseHelper.java @@ -15,23 +15,28 @@ package software.amazon.kinesis.leases; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - import java.util.Collection; import java.util.Collections; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + public class LeaseHelper { public static Lease createLease(String leaseKey, String leaseOwner, Collection parentShardIds) { return createLease(leaseKey, leaseOwner, parentShardIds, Collections.emptySet(), ExtendedSequenceNumber.LATEST); } - public static Lease createLease(String leaseKey, String leaseOwner, Collection parentShardIds, Collection childShardIds) { + public static Lease createLease( + String leaseKey, String leaseOwner, Collection parentShardIds, Collection childShardIds) { return createLease(leaseKey, leaseOwner, parentShardIds, childShardIds, ExtendedSequenceNumber.LATEST); } - public static Lease createLease(String leaseKey, String leaseOwner, Collection parentShardIds, - Collection childShardIds, ExtendedSequenceNumber extendedSequenceNumber) { + public static Lease createLease( + String leaseKey, + String leaseOwner, + Collection parentShardIds, + Collection childShardIds, + ExtendedSequenceNumber extendedSequenceNumber) { Lease lease = new Lease(); lease.leaseKey(leaseKey); lease.leaseOwner(leaseOwner); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationBillingModePayPerRequestTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationBillingModePayPerRequestTest.java index 9f7735f9..f35b4ed8 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationBillingModePayPerRequestTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationBillingModePayPerRequestTest.java @@ -15,22 +15,21 @@ package software.amazon.kinesis.leases; import lombok.extern.slf4j.Slf4j; -import org.junit.Rule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; -import org.mockito.Mock; -import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.BillingMode; import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseSerializer; -import software.amazon.kinesis.leases.dynamodb.TableCreatorCallback; @Slf4j public class LeaseIntegrationBillingModePayPerRequestTest extends LeaseIntegrationTest { @Override protected DynamoDBLeaseRefresher getLeaseRefresher() { - return new DynamoDBLeaseRefresher(tableName+"Per-Request", ddbClient, leaseSerializer, true, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PAY_PER_REQUEST); + return new DynamoDBLeaseRefresher( + tableName + "Per-Request", + ddbClient, + leaseSerializer, + true, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PAY_PER_REQUEST, + false); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java index fd5106e4..6f312271 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java @@ -14,12 +14,11 @@ */ package software.amazon.kinesis.leases; +import lombok.extern.slf4j.Slf4j; import org.junit.Rule; import org.junit.rules.TestWatcher; import org.junit.runner.Description; import org.mockito.Mock; - -import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.BillingMode; @@ -33,7 +32,8 @@ public class LeaseIntegrationTest { protected static DynamoDBLeaseRefresher leaseRefresher; protected static DynamoDbAsyncClient ddbClient = DynamoDbAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); protected String tableName = "nagl_ShardProgress"; @@ -73,9 +73,14 @@ public class LeaseIntegrationTest { }; protected DynamoDBLeaseRefresher getLeaseRefresher() { - return new DynamoDBLeaseRefresher(tableName, ddbClient, leaseSerializer, true, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PAY_PER_REQUEST); + return new DynamoDBLeaseRefresher( + tableName, + ddbClient, + leaseSerializer, + true, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PAY_PER_REQUEST, + false); } - } - diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseTest.java new file mode 100644 index 00000000..e0e338ba --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseTest.java @@ -0,0 +1,66 @@ +package software.amazon.kinesis.leases; + +import java.util.Collections; +import java.util.HashSet; +import java.util.concurrent.TimeUnit; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + +@RunWith(MockitoJUnitRunner.class) +public class LeaseTest { + + private static final long MOCK_CURRENT_TIME = 10000000000L; + private static final long LEASE_DURATION_MILLIS = 1000L; + + private static final long LEASE_DURATION_NANOS = TimeUnit.MILLISECONDS.toNanos(LEASE_DURATION_MILLIS); + + // Write a unit test for software.amazon.kinesis.leases.Lease to test leaseOwner as null and epired + @Test + public void testLeaseOwnerNullAndExpired() { + long expiredTime = MOCK_CURRENT_TIME - LEASE_DURATION_NANOS - 1; + Lease lease = createLease(null, "leaseKey", expiredTime); + Assert.assertTrue(lease.isAvailable(LEASE_DURATION_NANOS, MOCK_CURRENT_TIME)); + Assert.assertNull(lease.leaseOwner()); + } + + @Test + public void testLeaseOwnerNotNullAndExpired() { + long expiredTime = MOCK_CURRENT_TIME - LEASE_DURATION_NANOS - 1; + Lease lease = createLease("leaseOwner", "leaseKey", expiredTime); + Assert.assertTrue(lease.isAvailable(LEASE_DURATION_NANOS, MOCK_CURRENT_TIME)); + Assert.assertEquals("leaseOwner", lease.leaseOwner()); + } + + @Test + public void testLeaseOwnerNotNullAndNotExpired() { + long notExpiredTime = MOCK_CURRENT_TIME - LEASE_DURATION_NANOS + 1; + Lease lease = createLease("leaseOwner", "leaseKey", notExpiredTime); + Assert.assertFalse(lease.isAvailable(LEASE_DURATION_NANOS, MOCK_CURRENT_TIME)); + Assert.assertEquals("leaseOwner", lease.leaseOwner()); + } + + @Test + public void testLeaseOwnerNullAndNotExpired() { + long notExpiredTime = MOCK_CURRENT_TIME - LEASE_DURATION_NANOS + 1; + Lease lease = createLease(null, "leaseKey", notExpiredTime); + Assert.assertTrue(lease.isAvailable(LEASE_DURATION_NANOS, MOCK_CURRENT_TIME)); + Assert.assertNull(lease.leaseOwner()); + } + + private Lease createLease(String leaseOwner, String leaseKey, long lastCounterIncrementNanos) { + final Lease lease = new Lease(); + lease.checkpoint(new ExtendedSequenceNumber("checkpoint")); + lease.ownerSwitchesSinceCheckpoint(0L); + lease.leaseCounter(0L); + lease.leaseOwner(leaseOwner); + lease.parentShardIds(Collections.singleton("parentShardId")); + lease.childShardIds(new HashSet<>()); + lease.leaseKey(leaseKey); + lease.lastCounterIncrementNanos(lastCounterIncrementNanos); + return lease; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java index 5147ba79..9caa9648 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java @@ -14,9 +14,6 @@ */ package software.amazon.kinesis.leases; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -24,9 +21,11 @@ import java.util.List; import java.util.Random; import org.junit.Test; - import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + public class ParentsFirstShardPrioritizationUnitTest { @Test(expected = IllegalArgumentException.class) @@ -169,8 +168,7 @@ public class ParentsFirstShardPrioritizationUnitTest { private List parentShardIds = Collections.emptyList(); private ExtendedSequenceNumber checkpoint = ExtendedSequenceNumber.LATEST; - ShardInfoBuilder() { - } + ShardInfoBuilder() {} ShardInfoBuilder withShardId(String shardId) { this.shardId = shardId; diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java index 276f6c25..d61194fc 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java @@ -14,25 +14,22 @@ */ package software.amazon.kinesis.leases; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; - import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.UUID; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; - -import software.amazon.kinesis.leases.ShardInfo; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + public class ShardInfoTest { private static final String CONCURRENCY_TOKEN = UUID.randomUUID().toString(); private static final String SHARD_ID = "shardId-test"; @@ -50,7 +47,8 @@ public class ShardInfoTest { @Test public void testPacboyShardInfoEqualsWithSameArgs() { - ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); + ShardInfo equalShardInfo = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); assertTrue("Equal should return true for arguments all the same", testShardInfo.equals(equalShardInfo)); } @@ -61,9 +59,9 @@ public class ShardInfoTest { @Test public void testPacboyShardInfoEqualsForfToken() { - ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, UUID.randomUUID().toString(), parentShardIds, ExtendedSequenceNumber.LATEST); - assertFalse("Equal should return false with different concurrency token", - diffShardInfo.equals(testShardInfo)); + ShardInfo diffShardInfo = + new ShardInfo(SHARD_ID, UUID.randomUUID().toString(), parentShardIds, ExtendedSequenceNumber.LATEST); + assertFalse("Equal should return false with different concurrency token", diffShardInfo.equals(testShardInfo)); diffShardInfo = new ShardInfo(SHARD_ID, null, parentShardIds, ExtendedSequenceNumber.LATEST); assertFalse("Equal should return false for null concurrency token", diffShardInfo.equals(testShardInfo)); } @@ -73,9 +71,10 @@ public class ShardInfoTest { List differentlyOrderedParentShardIds = new ArrayList<>(); differentlyOrderedParentShardIds.add("shard-2"); differentlyOrderedParentShardIds.add("shard-1"); - ShardInfo shardInfoWithDifferentlyOrderedParentShardIds = - new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, differentlyOrderedParentShardIds, ExtendedSequenceNumber.LATEST); - assertTrue("Equal should return true even with parent shard Ids reordered", + ShardInfo shardInfoWithDifferentlyOrderedParentShardIds = new ShardInfo( + SHARD_ID, CONCURRENCY_TOKEN, differentlyOrderedParentShardIds, ExtendedSequenceNumber.LATEST); + assertTrue( + "Equal should return true even with parent shard Ids reordered", shardInfoWithDifferentlyOrderedParentShardIds.equals(testShardInfo)); } @@ -84,34 +83,40 @@ public class ShardInfoTest { Set diffParentIds = new HashSet<>(); diffParentIds.add("shard-3"); diffParentIds.add("shard-4"); - ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, diffParentIds, ExtendedSequenceNumber.LATEST); - assertFalse("Equal should return false with different parent shard Ids", - diffShardInfo.equals(testShardInfo)); + ShardInfo diffShardInfo = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, diffParentIds, ExtendedSequenceNumber.LATEST); + assertFalse("Equal should return false with different parent shard Ids", diffShardInfo.equals(testShardInfo)); diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, null, ExtendedSequenceNumber.LATEST); assertFalse("Equal should return false with null parent shard Ids", diffShardInfo.equals(testShardInfo)); } @Test public void testShardInfoCheckpointEqualsHashCode() { - ShardInfo baseInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, - ExtendedSequenceNumber.TRIM_HORIZON); - ShardInfo differentCheckpoint = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, - new ExtendedSequenceNumber("1234")); + ShardInfo baseInfo = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + ShardInfo differentCheckpoint = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, new ExtendedSequenceNumber("1234")); ShardInfo nullCheckpoint = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, null); assertThat("Checkpoint should not be included in equality.", baseInfo.equals(differentCheckpoint), is(true)); assertThat("Checkpoint should not be included in equality.", baseInfo.equals(nullCheckpoint), is(true)); - assertThat("Checkpoint should not be included in hash code.", baseInfo.hashCode(), + assertThat( + "Checkpoint should not be included in hash code.", + baseInfo.hashCode(), equalTo(differentCheckpoint.hashCode())); - assertThat("Checkpoint should not be included in hash code.", baseInfo.hashCode(), + assertThat( + "Checkpoint should not be included in hash code.", + baseInfo.hashCode(), equalTo(nullCheckpoint.hashCode())); } @Test public void testPacboyShardInfoSameHashCode() { - ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); - assertTrue("Shard info objects should have same hashCode for the same arguments", + ShardInfo equalShardInfo = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); + assertTrue( + "Shard info objects should have same hashCode for the same arguments", equalShardInfo.hashCode() == testShardInfo.hashCode()); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java index ee2504d8..228e65df 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java @@ -18,7 +18,6 @@ import java.math.BigInteger; import java.util.ArrayList; import java.util.List; - import software.amazon.awssdk.services.kinesis.model.HashKeyRange; import software.amazon.awssdk.services.kinesis.model.SequenceNumberRange; import software.amazon.awssdk.services.kinesis.model.Shard; @@ -29,12 +28,13 @@ import software.amazon.awssdk.services.kinesis.model.Shard; public class ShardObjectHelper { private static final int EXPONENT = 128; - + /** * Max value of a sequence number (2^128 -1). Useful for defining sequence number range for a shard. */ - static final String MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); - + static final String MAX_SEQUENCE_NUMBER = + new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); + /** * Min value of a sequence number (0). Useful for defining sequence number range for a shard. */ @@ -43,19 +43,18 @@ public class ShardObjectHelper { /** * Max value of a hash key (2^128 -1). Useful for defining hash key range for a shard. */ - public static final String MAX_HASH_KEY = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); - + public static final String MAX_HASH_KEY = + new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); + /** * Min value of a hash key (0). Useful for defining sequence number range for a shard. */ public static final String MIN_HASH_KEY = BigInteger.ZERO.toString(); /** - * + * */ - private ShardObjectHelper() { - } - + private ShardObjectHelper() {} /** Helper method to create a new shard object. * @param shardId @@ -64,11 +63,17 @@ public class ShardObjectHelper { * @param sequenceNumberRange * @return */ - static Shard newShard(String shardId, + static Shard newShard( + String shardId, String parentShardId, String adjacentParentShardId, SequenceNumberRange sequenceNumberRange) { - return newShard(shardId, parentShardId, adjacentParentShardId, sequenceNumberRange, HashKeyRange.builder().startingHashKey("1").endingHashKey("100").build()); + return newShard( + shardId, + parentShardId, + adjacentParentShardId, + sequenceNumberRange, + HashKeyRange.builder().startingHashKey("1").endingHashKey("100").build()); } /** Helper method to create a new shard object. @@ -79,12 +84,19 @@ public class ShardObjectHelper { * @param hashKeyRange * @return */ - public static Shard newShard(String shardId, - String parentShardId, - String adjacentParentShardId, - SequenceNumberRange sequenceNumberRange, - HashKeyRange hashKeyRange) { - return Shard.builder().shardId(shardId).parentShardId(parentShardId).adjacentParentShardId(adjacentParentShardId).sequenceNumberRange(sequenceNumberRange).hashKeyRange(hashKeyRange).build(); + public static Shard newShard( + String shardId, + String parentShardId, + String adjacentParentShardId, + SequenceNumberRange sequenceNumberRange, + HashKeyRange hashKeyRange) { + return Shard.builder() + .shardId(shardId) + .parentShardId(parentShardId) + .adjacentParentShardId(adjacentParentShardId) + .sequenceNumberRange(sequenceNumberRange) + .hashKeyRange(hashKeyRange) + .build(); } /** Helper method. @@ -92,8 +104,12 @@ public class ShardObjectHelper { * @param endingSequenceNumber * @return */ - public static SequenceNumberRange newSequenceNumberRange(String startingSequenceNumber, String endingSequenceNumber) { - return SequenceNumberRange.builder().startingSequenceNumber(startingSequenceNumber).endingSequenceNumber(endingSequenceNumber).build(); + public static SequenceNumberRange newSequenceNumberRange( + String startingSequenceNumber, String endingSequenceNumber) { + return SequenceNumberRange.builder() + .startingSequenceNumber(startingSequenceNumber) + .endingSequenceNumber(endingSequenceNumber) + .build(); } /** Helper method. @@ -102,9 +118,12 @@ public class ShardObjectHelper { * @return */ public static HashKeyRange newHashKeyRange(String startingHashKey, String endingHashKey) { - return HashKeyRange.builder().startingHashKey(startingHashKey).endingHashKey(endingHashKey).build(); + return HashKeyRange.builder() + .startingHashKey(startingHashKey) + .endingHashKey(endingHashKey) + .build(); } - + static List getParentShardIds(Shard shard) { List parentShardIds = new ArrayList<>(2); if (shard.adjacentParentShardId() != null) { @@ -115,6 +134,4 @@ public class ShardObjectHelper { } return parentShardIds; } - - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java index d7a94266..3e69e244 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java @@ -21,7 +21,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentSkipListSet; - import junit.framework.Assert; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.services.kinesis.model.Shard; @@ -46,14 +45,14 @@ public class ShardSequenceVerifier { shardIdToShards.put(shard.shardId(), shard); } } - + public void registerInitialization(String shardId) { List parentShardIds = ShardObjectHelper.getParentShardIds(shardIdToShards.get(shardId)); for (String parentShardId : parentShardIds) { if (initializedShards.contains(parentShardId)) { if (!shutdownShards.contains(parentShardId)) { - String message = "Parent shard " + parentShardId + " was not shutdown before shard " - + shardId + " was initialized."; + String message = "Parent shard " + parentShardId + " was not shutdown before shard " + shardId + + " was initialized."; log.error(message); validationFailures.add(message); } @@ -61,18 +60,17 @@ public class ShardSequenceVerifier { } initializedShards.add(shardId); } - + public void registerShutdown(String shardId, ShutdownReason reason) { if (reason.equals(ShutdownReason.SHARD_END)) { shutdownShards.add(shardId); } } - + public void verify() { for (String message : validationFailures) { log.error(message); } Assert.assertTrue(validationFailures.isEmpty()); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java index ce6ce386..28915b16 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java @@ -14,8 +14,6 @@ */ package software.amazon.kinesis.leases; -//import java.net.URI; - import java.util.HashSet; import java.util.List; import java.util.Set; @@ -26,9 +24,6 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; - -//import software.amazon.awssdk.core.client.builder.ClientAsyncHttpConfiguration; -//import software.amazon.awssdk.http.nio.netty.NettySdkHttpClientFactory; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; @@ -67,43 +62,58 @@ public class ShardSyncTaskIntegrationTest { @BeforeClass public static void setUpBeforeClass() throws Exception { -// ClientAsyncHttpConfiguration configuration = ClientAsyncHttpConfiguration.builder().httpClientFactory( -// NettySdkHttpClientFactory.builder().trustAllCertificates(true).maxConnectionsPerEndpoint(10).build()) -// .build(); -// kinesisClient = KinesisAsyncClient.builder().asyncHttpConfiguration(configuration) -// .endpointOverride(new URI("https://aws-kinesis-alpha.corp.amazon.com")).region(Region.US_EAST_1) -// .build(); -// + // ClientAsyncHttpConfiguration configuration = ClientAsyncHttpConfiguration.builder().httpClientFactory( + // + // NettySdkHttpClientFactory.builder().trustAllCertificates(true).maxConnectionsPerEndpoint(10).build()) + // .build(); + // kinesisClient = KinesisAsyncClient.builder().asyncHttpConfiguration(configuration) + // .endpointOverride(new + // URI("https://aws-kinesis-alpha.corp.amazon.com")).region(Region.US_EAST_1) + // .build(); + // try { - CreateStreamRequest req = CreateStreamRequest.builder().streamName(STREAM_NAME).shardCount(1).build(); + CreateStreamRequest req = CreateStreamRequest.builder() + .streamName(STREAM_NAME) + .shardCount(1) + .build(); kinesisClient.createStream(req); } catch (KinesisException ase) { ase.printStackTrace(); } StreamStatus status; -// do { -// status = StreamStatus.fromValue(kinesisClient.describeStreamSummary( -// DescribeStreamSummaryRequest.builder().streamName(STREAM_NAME).build()).get() -// .streamDescriptionSummary().streamStatusString()); -// } while (status != StreamStatus.ACTIVE); -// + // do { + // status = StreamStatus.fromValue(kinesisClient.describeStreamSummary( + // DescribeStreamSummaryRequest.builder().streamName(STREAM_NAME).build()).get() + // .streamDescriptionSummary().streamStatusString()); + // } while (status != StreamStatus.ACTIVE); + // } @Before public void setup() { - DynamoDbAsyncClient client = DynamoDbAsyncClient.builder().region(Region.US_EAST_1).build(); - leaseRefresher = - new DynamoDBLeaseRefresher("ShardSyncTaskIntegrationTest", client, new DynamoDBLeaseSerializer(), - USE_CONSISTENT_READS, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); + DynamoDbAsyncClient client = + DynamoDbAsyncClient.builder().region(Region.US_EAST_1).build(); + leaseRefresher = new DynamoDBLeaseRefresher( + "ShardSyncTaskIntegrationTest", + client, + new DynamoDBLeaseSerializer(), + USE_CONSISTENT_READS, + TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); - shardDetector = new KinesisShardDetector(kinesisClient, STREAM_NAME, 500L, 50, - LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, MAX_CACHE_MISSES_BEFORE_RELOAD, CACHE_MISS_WARNING_MODULUS); + shardDetector = new KinesisShardDetector( + kinesisClient, + STREAM_NAME, + 500L, + 50, + LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, + MAX_CACHE_MISSES_BEFORE_RELOAD, + CACHE_MISS_WARNING_MODULUS); hierarchicalShardSyncer = new HierarchicalShardSyncer(); } /** * Test method for call(). - * + * * @throws DependencyException * @throws InvalidStateException * @throws ProvisionedThroughputException @@ -116,11 +126,18 @@ public class ShardSyncTaskIntegrationTest { leaseRefresher.createLeaseTableIfNotExists(readCapacity, writeCapacity); } leaseRefresher.deleteAll(); - Set shardIds = shardDetector.listShards().stream().map(Shard::shardId).collect(Collectors.toSet()); - ShardSyncTask syncTask = new ShardSyncTask(shardDetector, leaseRefresher, + Set shardIds = + shardDetector.listShards().stream().map(Shard::shardId).collect(Collectors.toSet()); + ShardSyncTask syncTask = new ShardSyncTask( + shardDetector, + leaseRefresher, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST), - false, true, false, 0L, - hierarchicalShardSyncer, NULL_METRICS_FACTORY); + false, + true, + false, + 0L, + hierarchicalShardSyncer, + NULL_METRICS_FACTORY); syncTask.call(); List leases = leaseRefresher.listLeases(); Set leaseKeys = new HashSet<>(); @@ -133,5 +150,4 @@ public class ShardSyncTaskIntegrationTest { shardIds.removeAll(leaseKeys); Assert.assertTrue(shardIds.isEmpty()); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java index 63cb97e8..1523fe38 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java @@ -14,10 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.UUID; import org.junit.Before; @@ -25,7 +21,6 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.kinesis.checkpoint.dynamodb.DynamoDBCheckpointer; import software.amazon.kinesis.exceptions.KinesisClientLibException; import software.amazon.kinesis.exceptions.ShutdownException; @@ -37,6 +32,10 @@ import software.amazon.kinesis.leases.exceptions.InvalidStateException; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class DynamoDBCheckpointerTest { private static final String SHARD_ID = "shardId-test"; @@ -46,6 +45,7 @@ public class DynamoDBCheckpointerTest { @Mock private LeaseRefresher leaseRefresher; + @Mock private LeaseCoordinator leaseCoordinator; @@ -58,11 +58,13 @@ public class DynamoDBCheckpointerTest { } @Test(expected = ShutdownException.class) - public void testSetCheckpointWithUnownedShardId() throws KinesisClientLibException, DependencyException, - InvalidStateException, ProvisionedThroughputException { + public void testSetCheckpointWithUnownedShardId() + throws KinesisClientLibException, DependencyException, InvalidStateException, + ProvisionedThroughputException { final Lease lease = new Lease(); when(leaseCoordinator.getCurrentlyHeldLease(eq(SHARD_ID))).thenReturn(lease); - when(leaseCoordinator.updateLease(eq(lease), eq(TEST_UUID), eq(OPERATION), eq(SHARD_ID))).thenReturn(false); + when(leaseCoordinator.updateLease(eq(lease), eq(TEST_UUID), eq(OPERATION), eq(SHARD_ID))) + .thenReturn(false); try { dynamoDBCheckpointer.setCheckpoint(SHARD_ID, TEST_CHKPT, TEST_UUID.toString()); } finally { @@ -71,11 +73,11 @@ public class DynamoDBCheckpointerTest { } } -// @Test(expected = DependencyException.class) -// public void testWaitLeaseTableTimeout() -// throws DependencyException, ProvisionedThroughputException, IllegalStateException { -// Set mock lease manager to return false in waiting -// doReturn(false).when(leaseRefresher).waitUntilLeaseTableExists(anyLong(), anyLong()); -// leaseCoordinator.initialize(); -// } + // @Test(expected = DependencyException.class) + // public void testWaitLeaseTableTimeout() + // throws DependencyException, ProvisionedThroughputException, IllegalStateException { + // Set mock lease manager to return false in waiting + // doReturn(false).when(leaseRefresher).waitUntilLeaseTableExists(anyLong(), anyLong()); + // leaseCoordinator.initialize(); + // } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java index d89c010e..f52b91e1 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java @@ -14,15 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -33,7 +24,6 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.kinesis.checkpoint.dynamodb.DynamoDBCheckpointer; @@ -47,6 +37,14 @@ import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + @RunWith(MockitoJUnitRunner.class) public class DynamoDBLeaseCoordinatorIntegrationTest { private static final int ATTEMPTS = 20; @@ -74,9 +72,14 @@ public class DynamoDBLeaseCoordinatorIntegrationTest { final boolean useConsistentReads = true; if (leaseRefresher == null) { DynamoDbAsyncClient dynamoDBClient = DynamoDbAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDBClient, new DynamoDBLeaseSerializer(), - useConsistentReads, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDBClient, + new DynamoDBLeaseSerializer(), + useConsistentReads, + TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); } leaseRefresher.createLeaseTableIfNotExists(10L, 10L); @@ -97,9 +100,17 @@ public class DynamoDBLeaseCoordinatorIntegrationTest { } leaseRefresher.deleteAll(); - coordinator = new DynamoDBLeaseCoordinator(leaseRefresher, WORKER_ID, LEASE_DURATION_MILLIS, - EPSILON_MILLIS, MAX_LEASES_FOR_WORKER, MAX_LEASES_TO_STEAL_AT_ONE_TIME, MAX_LEASE_RENEWER_THREAD_COUNT, - INITIAL_LEASE_TABLE_READ_CAPACITY, INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + coordinator = new DynamoDBLeaseCoordinator( + leaseRefresher, + WORKER_ID, + LEASE_DURATION_MILLIS, + EPSILON_MILLIS, + MAX_LEASES_FOR_WORKER, + MAX_LEASES_TO_STEAL_AT_ONE_TIME, + MAX_LEASE_RENEWER_THREAD_COUNT, + INITIAL_LEASE_TABLE_READ_CAPACITY, + INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); dynamoDBCheckpointer = new DynamoDBCheckpointer(coordinator, leaseRefresher); dynamoDBCheckpointer.operation(OPERATION); @@ -141,7 +152,8 @@ public class DynamoDBLeaseCoordinatorIntegrationTest { lease.leaseOwner(coordinator.workerIdentifier()); assertEquals(lease, leaseFromDDBAtInitialCheckpoint); - dynamoDBCheckpointer.prepareCheckpoint(lease.leaseKey(), pendingCheckpoint, lease.concurrencyToken().toString(), checkpointState); + dynamoDBCheckpointer.prepareCheckpoint( + lease.leaseKey(), pendingCheckpoint, lease.concurrencyToken().toString(), checkpointState); final Lease leaseFromDDBAtPendingCheckpoint = leaseRefresher.getLease(lease.leaseKey()); lease.leaseCounter(lease.leaseCounter() + 1); @@ -258,5 +270,4 @@ public class DynamoDBLeaseCoordinatorIntegrationTest { return leases; } } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorTest.java index caa7a6c7..2b9ffbcd 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorTest.java @@ -1,6 +1,7 @@ package software.amazon.kinesis.leases.dynamodb; -import org.junit.Assert; +import java.util.UUID; + import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -10,9 +11,7 @@ import software.amazon.kinesis.leases.LeaseRefresher; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.metrics.MetricsFactory; -import java.util.UUID; - -import static org.mockito.Mockito.times; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -20,6 +19,7 @@ import static org.mockito.Mockito.when; public class DynamoDBLeaseCoordinatorTest { private static final String WORKER_ID = UUID.randomUUID().toString(); + private static final boolean ENABLE_PRIORITY_LEASE_ASSIGNMENT = true; private static final long LEASE_DURATION_MILLIS = 5000L; private static final long EPSILON_MILLIS = 25L; private static final int MAX_LEASES_FOR_WORKER = Integer.MAX_VALUE; @@ -32,6 +32,7 @@ public class DynamoDBLeaseCoordinatorTest { @Mock private LeaseRefresher leaseRefresher; + @Mock private MetricsFactory metricsFactory; @@ -39,29 +40,56 @@ public class DynamoDBLeaseCoordinatorTest { @Before public void setup() { - this.leaseCoordinator = new DynamoDBLeaseCoordinator(leaseRefresher, WORKER_ID, LEASE_DURATION_MILLIS, - EPSILON_MILLIS, MAX_LEASES_FOR_WORKER, MAX_LEASES_TO_STEAL_AT_ONE_TIME, MAX_LEASE_RENEWER_THREAD_COUNT, - INITIAL_LEASE_TABLE_READ_CAPACITY, INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + this.leaseCoordinator = new DynamoDBLeaseCoordinator( + leaseRefresher, + WORKER_ID, + LEASE_DURATION_MILLIS, + ENABLE_PRIORITY_LEASE_ASSIGNMENT, + EPSILON_MILLIS, + MAX_LEASES_FOR_WORKER, + MAX_LEASES_TO_STEAL_AT_ONE_TIME, + MAX_LEASE_RENEWER_THREAD_COUNT, + INITIAL_LEASE_TABLE_READ_CAPACITY, + INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); } @Test public void testInitialize_tableCreationSucceeds() throws Exception { when(leaseRefresher.createLeaseTableIfNotExists()).thenReturn(true); - when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)).thenReturn(true); + when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)) + .thenReturn(true); leaseCoordinator.initialize(); - verify(leaseRefresher, times(1)).createLeaseTableIfNotExists(); - verify(leaseRefresher, times(1)).waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS); + verify(leaseRefresher).createLeaseTableIfNotExists(); + verify(leaseRefresher).waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS); } - @Test + @Test(expected = DependencyException.class) public void testInitialize_tableCreationFails() throws Exception { when(leaseRefresher.createLeaseTableIfNotExists()).thenReturn(false); - when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)).thenReturn(false); + when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)) + .thenReturn(false); - Assert.assertThrows(DependencyException.class, () -> leaseCoordinator.initialize()); - verify(leaseRefresher, times(1)).createLeaseTableIfNotExists(); - verify(leaseRefresher, times(1)).waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS); + try { + leaseCoordinator.initialize(); + } finally { + verify(leaseRefresher).createLeaseTableIfNotExists(); + verify(leaseRefresher).waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS); + } + } + + /** + * Validates a {@link NullPointerException} is not thrown when the lease taker + * is stopped before it starts/exists. + * + * @see issue #745 + * @see issue #900 + */ + @Test + public void testStopLeaseTakerBeforeStart() { + leaseCoordinator.stopLeaseTaker(); + assertTrue(leaseCoordinator.getAssignments().isEmpty()); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java index 1b2fa78a..21a7a44f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java @@ -17,7 +17,9 @@ package software.amazon.kinesis.leases.dynamodb; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -29,12 +31,12 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseIntegrationTest; import software.amazon.kinesis.leases.UpdateField; import software.amazon.kinesis.leases.exceptions.LeasingException; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.verify; @@ -44,8 +46,12 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest @Before public void setup() { - doNothing().when(tableCreatorCallback).performAction( - eq(TableCreatorCallbackInput.builder().dynamoDbClient(ddbClient).tableName(tableName).build())); + doNothing() + .when(tableCreatorCallback) + .performAction(eq(TableCreatorCallbackInput.builder() + .dynamoDbClient(ddbClient) + .tableName(tableName) + .build())); } /** @@ -125,10 +131,8 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); Lease lease = builder.withLease("1").build().get("1"); final String leaseKey = lease.leaseKey(); - final HashKeyRangeForLease hashKeyRangeForLease = HashKeyRangeForLease.fromHashKeyRange(HashKeyRange.builder() - .startingHashKey("1") - .endingHashKey("2") - .build()); + final HashKeyRangeForLease hashKeyRangeForLease = HashKeyRangeForLease.fromHashKeyRange( + HashKeyRange.builder().startingHashKey("1").endingHashKey("2").build()); lease.hashKeyRange(hashKeyRangeForLease); leaseRefresher.updateLeaseWithMetaInfo(lease, UpdateField.HASH_KEY_RANGE); final Lease updatedLease = leaseRefresher.getLease(leaseKey); @@ -186,7 +190,8 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest private void testTakeLease(boolean owned) throws LeasingException { TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1", owned ? "originalOwner" : null).build().get("1"); + Lease lease = + builder.withLease("1", owned ? "originalOwner" : null).build().get("1"); Long originalLeaseCounter = lease.leaseCounter(); String newOwner = "newOwner"; @@ -299,17 +304,16 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest @Test public void testWaitUntilLeaseTableExists() throws LeasingException { - DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher("nagl_ShardProgress", ddbClient, - new DynamoDBLeaseSerializer(), true, tableCreatorCallback) { - @Override - long sleep(long timeToSleepMillis) { - fail("Should not sleep"); - return 0L; - } + final UUID uniqueId = UUID.randomUUID(); + DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher( + "tableEventuallyExists_" + uniqueId, + ddbClient, + new DynamoDBLeaseSerializer(), + true, + tableCreatorCallback); - }; - - assertTrue(refresher.waitUntilLeaseTableExists(1, 1)); + refresher.createLeaseTableIfNotExists(); + assertTrue(refresher.waitUntilLeaseTableExists(1, 20)); } @Test @@ -318,16 +322,16 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest * Just using AtomicInteger for the indirection it provides. */ final AtomicInteger sleepCounter = new AtomicInteger(0); - DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher("nonexistentTable", ddbClient, - new DynamoDBLeaseSerializer(), true, tableCreatorCallback) { - @Override - long sleep(long timeToSleepMillis) { - assertEquals(1000L, timeToSleepMillis); - sleepCounter.incrementAndGet(); - return 1000L; - } - - }; + DynamoDBLeaseRefresher refresher = + new DynamoDBLeaseRefresher( + "nonexistentTable", ddbClient, new DynamoDBLeaseSerializer(), true, tableCreatorCallback) { + @Override + long sleep(long timeToSleepMillis) { + assertEquals(1000L, timeToSleepMillis); + sleepCounter.incrementAndGet(); + return 1000L; + } + }; assertFalse(refresher.waitUntilLeaseTableExists(2, 1)); assertEquals(1, sleepCounter.get()); @@ -335,12 +339,15 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest @Test public void testTableCreatorCallback() throws Exception { - DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher(tableName, ddbClient, - new DynamoDBLeaseSerializer(), true, tableCreatorCallback); + DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher( + tableName, ddbClient, new DynamoDBLeaseSerializer(), true, tableCreatorCallback); refresher.performPostTableCreationAction(); - verify(tableCreatorCallback).performAction( - eq(TableCreatorCallbackInput.builder().dynamoDbClient(ddbClient).tableName(tableName).build())); + verify(tableCreatorCallback) + .performAction(eq(TableCreatorCallbackInput.builder() + .dynamoDbClient(ddbClient) + .tableName(tableName) + .build())); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherTest.java index ac814d75..2668918c 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherTest.java @@ -14,18 +14,7 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -42,9 +31,10 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - +import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.BillingMode; import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; import software.amazon.awssdk.services.dynamodb.model.CreateTableResponse; import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; @@ -64,42 +54,73 @@ import software.amazon.awssdk.services.dynamodb.model.ScanRequest; import software.amazon.awssdk.services.dynamodb.model.ScanResponse; import software.amazon.awssdk.services.dynamodb.model.TableDescription; import software.amazon.awssdk.services.dynamodb.model.TableStatus; +import software.amazon.awssdk.services.dynamodb.model.Tag; +import software.amazon.awssdk.services.dynamodb.model.UpdateContinuousBackupsRequest; +import software.amazon.awssdk.services.dynamodb.model.UpdateContinuousBackupsResponse; import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; -import software.amazon.awssdk.services.dynamodb.model.BillingMode; - import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseManagementConfig; import software.amazon.kinesis.leases.LeaseSerializer; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class DynamoDBLeaseRefresherTest { private static final String TABLE_NAME = "test"; private static final boolean CONSISTENT_READS = true; + private static final boolean DELETION_PROTECTION_ENABLED = false; + private static final boolean PITR_ENABLED = true; + private static final Collection EMPTY_TAGS = DefaultSdkAutoConstructList.getInstance(); + private static final Collection TAGS = + Collections.singletonList(Tag.builder().key("foo").value("bar").build()); @Mock private DynamoDbAsyncClient dynamoDbClient; + @Mock private LeaseSerializer leaseSerializer; + @Mock private TableCreatorCallback tableCreatorCallback; + @Mock private CompletableFuture mockScanFuture; + @Mock private CompletableFuture mockPutItemFuture; + @Mock private CompletableFuture mockGetItemFuture; + @Mock private CompletableFuture mockUpdateFuture; + @Mock private CompletableFuture mockDeleteFuture; + @Mock private CompletableFuture mockDescribeTableFuture; + @Mock private CompletableFuture mockCreateTableFuture; + + @Mock + private CompletableFuture mockUpdateContinuousBackupsFuture; + @Mock private Lease lease; @@ -109,21 +130,27 @@ public class DynamoDBLeaseRefresherTest { private DynamoDBLeaseRefresher leaseRefresher; private DescribeTableRequest describeTableRequest; private CreateTableRequest createTableRequest; - + private UpdateContinuousBackupsRequest updateContinuousBackupsRequest; private Map serializedLease; @Before public void setup() throws Exception { - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, - tableCreatorCallback); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, tableCreatorCallback); serializedLease = new HashMap<>(); - describeTableRequest = DescribeTableRequest.builder().tableName(TABLE_NAME).build(); + describeTableRequest = + DescribeTableRequest.builder().tableName(TABLE_NAME).build(); createTableRequest = CreateTableRequest.builder() .tableName(TABLE_NAME) .keySchema(leaseSerializer.getKeySchema()) .attributeDefinitions(leaseSerializer.getAttributeDefinitions()) .billingMode(BillingMode.PAY_PER_REQUEST) + .deletionProtectionEnabled(DELETION_PROTECTION_ENABLED) + .build(); + updateContinuousBackupsRequest = UpdateContinuousBackupsRequest.builder() + .tableName(TABLE_NAME) + .pointInTimeRecoverySpecification(builder -> builder.pointInTimeRecoveryEnabled(PITR_ENABLED)) .build(); } @@ -147,14 +174,15 @@ public class DynamoDBLeaseRefresherTest { lastEvaluatedKey.put("Test", AttributeValue.builder().s("test").build()); when(mockScanFuture.get(anyLong(), any(TimeUnit.class))) - .thenReturn(ScanResponse.builder().lastEvaluatedKey(lastEvaluatedKey).build()) + .thenReturn(ScanResponse.builder() + .lastEvaluatedKey(lastEvaluatedKey) + .build()) .thenThrow(te); verifyCancel(mockScanFuture, () -> leaseRefresher.listLeases()); verify(mockScanFuture, times(2)).get(anyLong(), any(TimeUnit.class)); verify(dynamoDbClient, times(2)).scan(any(ScanRequest.class)); - } @Test @@ -175,7 +203,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) .thenReturn(DescribeTableResponse.builder() - .table(TableDescription.builder().tableStatus(TableStatus.UPDATING).build()) + .table(TableDescription.builder() + .tableStatus(TableStatus.UPDATING) + .build()) .build()); assertTrue(leaseRefresher.waitUntilLeaseTableExists(0, 0)); } @@ -185,7 +215,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) .thenReturn(DescribeTableResponse.builder() - .table(TableDescription.builder().tableStatus(TableStatus.ACTIVE).build()) + .table(TableDescription.builder() + .tableStatus(TableStatus.ACTIVE) + .build()) .build()); assertTrue(leaseRefresher.waitUntilLeaseTableExists(0, 0)); } @@ -195,7 +227,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) .thenReturn(DescribeTableResponse.builder() - .table(TableDescription.builder().tableStatus(TableStatus.CREATING).build()) + .table(TableDescription.builder() + .tableStatus(TableStatus.CREATING) + .build()) .build()); assertFalse(leaseRefresher.waitUntilLeaseTableExists(0, 0)); } @@ -205,7 +239,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) .thenReturn(DescribeTableResponse.builder() - .table(TableDescription.builder().tableStatus(TableStatus.DELETING).build()) + .table(TableDescription.builder() + .tableStatus(TableStatus.DELETING) + .build()) .build()); assertFalse(leaseRefresher.waitUntilLeaseTableExists(0, 0)); } @@ -225,7 +261,7 @@ public class DynamoDBLeaseRefresherTest { @Test public void testRenewLeaseTimesOut() throws Exception { setupUpdateItemTest(); - verifyCancel(mockUpdateFuture, () ->leaseRefresher.renewLease(lease)); + verifyCancel(mockUpdateFuture, () -> leaseRefresher.renewLease(lease)); } @Test @@ -250,7 +286,8 @@ public class DynamoDBLeaseRefresherTest { public void testDeleteAllLeasesTimesOut() throws Exception { TimeoutException te = setRuleForDependencyTimeout(); when(dynamoDbClient.scan(any(ScanRequest.class))).thenReturn(mockScanFuture); - when(mockScanFuture.get(anyLong(), any())).thenReturn(ScanResponse.builder().items(Collections.emptyMap()).build()); + when(mockScanFuture.get(anyLong(), any())) + .thenReturn(ScanResponse.builder().items(Collections.emptyMap()).build()); when(leaseSerializer.fromDynamoRecord(any())).thenReturn(lease); when(leaseSerializer.getDynamoHashKey(any(Lease.class))).thenReturn(Collections.emptyMap()); @@ -283,23 +320,37 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableProvisionedBillingModeIfNotExists() throws Exception { - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PROVISIONED); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDbClient, + leaseSerializer, + CONSISTENT_READS, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PROVISIONED, + DELETION_PROTECTION_ENABLED); when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); - final ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(10L) - .writeCapacityUnits(10L).build(); + final ProvisionedThroughput throughput = ProvisionedThroughput.builder() + .readCapacityUnits(10L) + .writeCapacityUnits(10L) + .build(); final CreateTableRequest createTableRequest = CreateTableRequest.builder() .tableName(TABLE_NAME) .keySchema(leaseSerializer.getKeySchema()) .attributeDefinitions(leaseSerializer.getAttributeDefinitions()) .provisionedThroughput(throughput) + .deletionProtectionEnabled(DELETION_PROTECTION_ENABLED) .build(); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenReturn(null); final boolean result = leaseRefresher.createLeaseTableIfNotExists(10L, 10L); @@ -313,13 +364,64 @@ public class DynamoDBLeaseRefresherTest { Assert.assertTrue(result); } + @Test + public void testCreateLeaseTableWithTagsIfNotExists() throws Exception { + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDbClient, + leaseSerializer, + CONSISTENT_READS, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PROVISIONED, + DELETION_PROTECTION_ENABLED, + TAGS); + + when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); + when(mockDescribeTableFuture.get( + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS)) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); + + final ProvisionedThroughput throughput = ProvisionedThroughput.builder() + .readCapacityUnits(10L) + .writeCapacityUnits(10L) + .build(); + final CreateTableRequest createTableRequest = CreateTableRequest.builder() + .tableName(TABLE_NAME) + .keySchema(leaseSerializer.getKeySchema()) + .attributeDefinitions(leaseSerializer.getAttributeDefinitions()) + .provisionedThroughput(throughput) + .deletionProtectionEnabled(DELETION_PROTECTION_ENABLED) + .tags(TAGS) + .build(); + when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); + when(mockCreateTableFuture.get(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS)) + .thenReturn(null); + + final boolean result = leaseRefresher.createLeaseTableIfNotExists(10L, 10L); + + verify(dynamoDbClient, times(1)).describeTable(describeTableRequest); + verify(dynamoDbClient, times(1)).createTable(createTableRequest); + verify(mockDescribeTableFuture, times(1)) + .get(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + verify(mockCreateTableFuture, times(1)) + .get(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + Assert.assertTrue(result); + } + @Test public void testCreateLeaseTableIfNotExists() throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenReturn(null); final boolean result = leaseRefresher.createLeaseTableIfNotExists(); @@ -333,14 +435,104 @@ public class DynamoDBLeaseRefresherTest { Assert.assertTrue(result); } + @Test + public void testCreateLeaseTableIfNotExistsWithPitrEnabled() throws Exception { + DynamoDBLeaseRefresher leaseRefresherWithEnabledPitr = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDbClient, + leaseSerializer, + CONSISTENT_READS, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PAY_PER_REQUEST, + DELETION_PROTECTION_ENABLED, + PITR_ENABLED, + EMPTY_TAGS); + when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); + when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenReturn(null); + when(dynamoDbClient.updateContinuousBackups(updateContinuousBackupsRequest)) + .thenReturn(mockUpdateContinuousBackupsFuture); + when(mockUpdateContinuousBackupsFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenReturn(null); + final boolean result = leaseRefresherWithEnabledPitr.createLeaseTableIfNotExists(); + + verify(dynamoDbClient, times(1)).describeTable(describeTableRequest); + verify(dynamoDbClient, times(1)).createTable(createTableRequest); + verify(dynamoDbClient, times(1)).updateContinuousBackups(updateContinuousBackupsRequest); + verify(mockDescribeTableFuture, times(1)) + .get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS)); + verify(mockCreateTableFuture, times(1)) + .get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS)); + Assert.assertTrue(result); + } + + @Test + public void testCreateLeaseTableProvisionedWithDeletionProtectionIfNotExists() throws Exception { + DynamoDBLeaseRefresher leaseRefresherWithEnabledDeletionProtection = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDbClient, + leaseSerializer, + CONSISTENT_READS, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PROVISIONED, + true); + + when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); + + final ProvisionedThroughput throughput = ProvisionedThroughput.builder() + .readCapacityUnits(10L) + .writeCapacityUnits(10L) + .build(); + final CreateTableRequest createTableRequest = CreateTableRequest.builder() + .tableName(TABLE_NAME) + .keySchema(leaseSerializer.getKeySchema()) + .attributeDefinitions(leaseSerializer.getAttributeDefinitions()) + .provisionedThroughput(throughput) + .deletionProtectionEnabled(true) + .build(); + when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenReturn(null); + + final boolean result = leaseRefresherWithEnabledDeletionProtection.createLeaseTableIfNotExists(10L, 10L); + + verify(dynamoDbClient, times(1)).describeTable(describeTableRequest); + verify(dynamoDbClient, times(1)).createTable(createTableRequest); + verify(mockDescribeTableFuture, times(1)) + .get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS)); + verify(mockCreateTableFuture, times(1)) + .get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS)); + Assert.assertTrue(result); + } + @Test public void testCreateLeaseTableIfNotExists_throwsDependencyException() throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenThrow(new InterruptedException()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceInUseException.builder().message("Table already exists").build()); + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceInUseException.builder() + .message("Table already exists") + .build()); Assert.assertFalse(leaseRefresher.createLeaseTableIfNotExists()); verify(dynamoDbClient, times(1)).describeTable(describeTableRequest); @@ -352,13 +544,20 @@ public class DynamoDBLeaseRefresherTest { } @Test - public void testCreateLeaseTableIfNotExists_tableAlreadyExists_throwsResourceInUseException_expectFalse() throws Exception { + public void testCreateLeaseTableIfNotExists_tableAlreadyExists_throwsResourceInUseException_expectFalse() + throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceInUseException.builder().message("Table already exists").build()); + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceInUseException.builder() + .message("Table already exists") + .build()); Assert.assertFalse(leaseRefresher.createLeaseTableIfNotExists()); verify(dynamoDbClient, times(1)).describeTable(describeTableRequest); @@ -370,12 +569,17 @@ public class DynamoDBLeaseRefresherTest { } @Test - public void testCreateLeaseTableIfNotExists_throwsLimitExceededException_expectProvisionedThroughputException() throws Exception { + public void testCreateLeaseTableIfNotExists_throwsLimitExceededException_expectProvisionedThroughputException() + throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenThrow(LimitExceededException.builder().build()); Assert.assertThrows(ProvisionedThroughputException.class, () -> leaseRefresher.createLeaseTableIfNotExists()); @@ -390,10 +594,14 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableIfNotExists_throwsDynamoDbException_expectDependencyException() throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenThrow(DynamoDbException.builder().build()); Assert.assertThrows(DependencyException.class, () -> leaseRefresher.createLeaseTableIfNotExists()); @@ -408,10 +616,14 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableIfNotExists_throwsTimeoutException_expectDependencyException() throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenThrow(new TimeoutException()); Assert.assertThrows(DependencyException.class, () -> leaseRefresher.createLeaseTableIfNotExists()); @@ -425,13 +637,22 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableProvisionedBillingModeTimesOut() throws Exception { - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PROVISIONED); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDbClient, + leaseSerializer, + CONSISTENT_READS, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PROVISIONED, + false); TimeoutException te = setRuleForDependencyTimeout(); when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(any(CreateTableRequest.class))).thenReturn(mockCreateTableFuture); when(mockCreateTableFuture.get(anyLong(), any())).thenThrow(te); @@ -445,7 +666,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(any(CreateTableRequest.class))).thenReturn(mockCreateTableFuture); when(mockCreateTableFuture.get(anyLong(), any())).thenThrow(te); @@ -485,5 +708,4 @@ public class DynamoDBLeaseRefresherTest { return te; } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest.java index 1dad013e..dd6a17a2 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest.java @@ -14,6 +14,10 @@ */ package software.amazon.kinesis.leases.dynamodb; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.Executors; + import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -25,19 +29,15 @@ import software.amazon.kinesis.leases.exceptions.LeasingException; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.Executors; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; import static org.junit.Assert.assertThat; @RunWith(MockitoJUnitRunner.class) -public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends - LeaseIntegrationBillingModePayPerRequestTest { - private final String TEST_METRIC = "TestOperation"; +public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest + extends LeaseIntegrationBillingModePayPerRequestTest { + private static final String TEST_METRIC = "TestOperation"; // This test case's leases last 2 seconds private static final long LEASE_DURATION_MILLIS = 2000L; @@ -46,8 +46,12 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends @Before public void setup() { - renewer = new DynamoDBLeaseRenewer(leaseRefresher, "foo", LEASE_DURATION_MILLIS, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + renewer = new DynamoDBLeaseRenewer( + leaseRefresher, + "foo", + LEASE_DURATION_MILLIS, + Executors.newCachedThreadPool(), + new NullMetricsFactory()); } @Test @@ -111,7 +115,7 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends builder.withLease("1", "foo").withLease("2", "foo").build(); builder.addLeasesToRenew(renewer, "1", "2"); - Lease lease2 = builder.renewMutateAssert(renewer, "1", "2").get("2"); + Lease lease2 = builder.renewMutateAssert(renewer, "1", "2").get("2"); // This should be a copy that doesn't get updated Map heldLeases = renewer.getCurrentlyHeldLeases(); @@ -141,7 +145,8 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends Lease expected = renewer.getCurrentlyHeldLease("1"); expected.checkpoint(new ExtendedSequenceNumber("new checkpoint")); - assertThat(renewer.updateLease(expected, expected.concurrencyToken(), TEST_METRIC, expected.leaseKey()), + assertThat( + renewer.updateLease(expected, expected.concurrencyToken(), TEST_METRIC, expected.leaseKey()), equalTo(true)); // Assert that the counter and data have changed immediately after the update... @@ -256,8 +261,8 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); builder.withLease(shardId, owner); Map leases = builder.build(); - DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer(leaseRefresher, owner, 30000L, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer( + leaseRefresher, owner, 30000L, Executors.newCachedThreadPool(), new NullMetricsFactory()); renewer.initialize(); Map heldLeases = renewer.getCurrentlyHeldLeases(); assertThat(heldLeases.size(), equalTo(leases.size())); @@ -271,8 +276,8 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); builder.withLease(shardId, owner); Map leases = builder.build(); - DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer(leaseRefresher, owner, 30000L, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer( + leaseRefresher, owner, 30000L, Executors.newCachedThreadPool(), new NullMetricsFactory()); renewer.initialize(); Map heldLeases = renewer.getCurrentlyHeldLeases(); assertThat(heldLeases.size(), equalTo(leases.size())); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java index 7c884fd6..5abd3a4b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java @@ -14,18 +14,12 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; - import java.util.Collections; import java.util.Map; import java.util.concurrent.Executors; import org.junit.Before; import org.junit.Test; - import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; import software.amazon.kinesis.leases.Lease; @@ -34,9 +28,15 @@ import software.amazon.kinesis.leases.LeaseRenewer; import software.amazon.kinesis.leases.exceptions.LeasingException; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertThat; + @RunWith(MockitoJUnitRunner.class) public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { - private final String TEST_METRIC = "TestOperation"; + private static final String TEST_METRIC = "TestOperation"; // This test case's leases last 2 seconds private static final long LEASE_DURATION_MILLIS = 2000L; @@ -45,8 +45,12 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { @Before public void setup() { - renewer = new DynamoDBLeaseRenewer(leaseRefresher, "foo", LEASE_DURATION_MILLIS, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + renewer = new DynamoDBLeaseRenewer( + leaseRefresher, + "foo", + LEASE_DURATION_MILLIS, + Executors.newCachedThreadPool(), + new NullMetricsFactory()); } @Test @@ -110,7 +114,7 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { builder.withLease("1", "foo").withLease("2", "foo").build(); builder.addLeasesToRenew(renewer, "1", "2"); - Lease lease2 = builder.renewMutateAssert(renewer, "1", "2").get("2"); + Lease lease2 = builder.renewMutateAssert(renewer, "1", "2").get("2"); // This should be a copy that doesn't get updated Map heldLeases = renewer.getCurrentlyHeldLeases(); @@ -140,7 +144,8 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { Lease expected = renewer.getCurrentlyHeldLease("1"); expected.checkpoint(new ExtendedSequenceNumber("new checkpoint")); - assertThat(renewer.updateLease(expected, expected.concurrencyToken(), TEST_METRIC, expected.leaseKey()), + assertThat( + renewer.updateLease(expected, expected.concurrencyToken(), TEST_METRIC, expected.leaseKey()), equalTo(true)); // Assert that the counter and data have changed immediately after the update... @@ -255,8 +260,8 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); builder.withLease(shardId, owner); Map leases = builder.build(); - DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer(leaseRefresher, owner, 30000L, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer( + leaseRefresher, owner, 30000L, Executors.newCachedThreadPool(), new NullMetricsFactory()); renewer.initialize(); Map heldLeases = renewer.getCurrentlyHeldLeases(); assertThat(heldLeases.size(), equalTo(leases.size())); @@ -270,8 +275,8 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); builder.withLease(shardId, owner); Map leases = builder.build(); - DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer(leaseRefresher, owner, 30000L, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer( + leaseRefresher, owner, 30000L, Executors.newCachedThreadPool(), new NullMetricsFactory()); renewer.initialize(); Map heldLeases = renewer.getCurrentlyHeldLeases(); assertThat(heldLeases.size(), equalTo(leases.size())); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java index bfff4e92..16a443c1 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java @@ -14,15 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -36,7 +27,6 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.kinesis.common.HashKeyRangeForLease; import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseRefresher; @@ -44,6 +34,18 @@ import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.leases.exceptions.InvalidStateException; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.metrics.NullMetricsFactory; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; @RunWith(MockitoJUnitRunner.class) public class DynamoDBLeaseRenewerTest { @@ -56,15 +58,30 @@ public class DynamoDBLeaseRenewerTest { private LeaseRefresher leaseRefresher; private static Lease newLease(String leaseKey) { - return new Lease(leaseKey, "LeaseOwner", 0L, UUID.randomUUID(), System.nanoTime(), null, null, null, - new HashSet<>(), new HashSet<>(), null, HashKeyRangeForLease.deserialize("1", "2")); + return new Lease( + leaseKey, + "LeaseOwner", + 0L, + UUID.randomUUID(), + System.nanoTime(), + null, + null, + 1L, + new HashSet<>(), + new HashSet<>(), + null, + HashKeyRangeForLease.deserialize("1", "2")); } @Before public void before() { leasesToRenew = null; - renewer = new DynamoDBLeaseRenewer(leaseRefresher, workerIdentifier, leaseDurationMillis, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + renewer = new DynamoDBLeaseRenewer( + leaseRefresher, + workerIdentifier, + leaseDurationMillis, + Executors.newCachedThreadPool(), + new NullMetricsFactory()); } @After @@ -79,14 +96,14 @@ public class DynamoDBLeaseRenewerTest { @Test public void testLeaseRenewerHoldsGoodLeases() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { /* * Prepare leases to be renewed * 2 Good */ Lease lease1 = newLease("1"); Lease lease2 = newLease("2"); - leasesToRenew = Arrays.asList(lease1,lease2); + leasesToRenew = Arrays.asList(lease1, lease2); renewer.addLeasesToRenew(leasesToRenew); doReturn(true).when(leaseRefresher).renewLease(lease1); @@ -98,7 +115,8 @@ public class DynamoDBLeaseRenewerTest { } @Test - public void testLeaseRenewerDoesNotRenewExpiredLease() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public void testLeaseRenewerDoesNotRenewExpiredLease() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { String leaseKey = "expiredLease"; long initialCounterIncrementNanos = 5L; // "expired" time. Lease lease1 = newLease(leaseKey); @@ -119,4 +137,35 @@ public class DynamoDBLeaseRenewerTest { // Clear the list to avoid triggering expectation mismatch in after(). leasesToRenew.clear(); } + + @Test + public void testLeaseRenewerDoesNotUpdateInMemoryLeaseIfDDBFailsUpdate() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + String leaseKey = "leaseToUpdate"; + Lease lease = newLease(leaseKey); + lease.checkpoint(ExtendedSequenceNumber.LATEST); + leasesToRenew = new ArrayList<>(); + leasesToRenew.add(lease); + renewer.addLeasesToRenew(leasesToRenew); + + doReturn(true).when(leaseRefresher).renewLease(lease); + renewer.renewLeases(); + + Lease updatedLease = newLease(leaseKey); + updatedLease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + + doThrow(new DependencyException(new RuntimeException())) + .when(leaseRefresher) + .updateLease(updatedLease); + + try { + UUID concurrencyToken = renewer.getCurrentlyHeldLease(leaseKey).concurrencyToken(); + renewer.updateLease(updatedLease, concurrencyToken, "test", "dummyShardId"); + fail(); + } catch (DependencyException e) { + // expected + } + assertEquals(0L, (long) lease.leaseCounter()); // leaseCounter should not be incremented due to DDB failure + assertEquals(ExtendedSequenceNumber.LATEST, lease.checkpoint()); + } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java index 475f1940..6b86a5e7 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java @@ -15,9 +15,8 @@ package software.amazon.kinesis.leases.dynamodb; import java.util.Collection; -import java.util.List; import java.util.Map; -import java.util.stream.Collectors; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -27,6 +26,7 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseIntegrationTest; import software.amazon.kinesis.leases.exceptions.LeasingException; import software.amazon.kinesis.metrics.NullMetricsFactory; + import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -104,7 +104,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { builder.withLease("4", "bar").build(); // setting multiplier to unusually high number to avoid very old lease taking - taker.withVeryOldLeaseDurationNanosMultipler(5000000000L); + taker.withVeryOldLeaseDurationNanosMultiplier(5000000); builder.takeMutateAssert(taker, 2); } @@ -143,7 +143,8 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { .withLease("5", "foo") .build(); - // In the current DynamoDBLeaseTaker implementation getAllLeases() gets leases from an internal cache that is built during takeLeases() operation + // In the current DynamoDBLeaseTaker implementation getAllLeases() gets leases from an internal cache that is + // built during takeLeases() operation assertThat(taker.allLeases().size(), equalTo(0)); taker.takeLeases(); @@ -153,7 +154,6 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { assertThat(addedLeases.values().containsAll(allLeases), equalTo(true)); } - /** * Sets the leaseDurationMillis to 0, ensuring a get request to update the existing lease after computing * leases to take @@ -161,10 +161,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { @Test public void testSlowGetAllLeases() throws LeasingException { long leaseDurationMillis = 0; - taker = new DynamoDBLeaseTaker(leaseRefresher, - "foo", - leaseDurationMillis, - new NullMetricsFactory()); + taker = new DynamoDBLeaseTaker(leaseRefresher, "foo", leaseDurationMillis, new NullMetricsFactory()); TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); Map addedLeases = builder.withLease("1", "bar") @@ -203,7 +200,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { /** * Verify that one activity is stolen from the highest loaded server when a server needs more than one lease and no * expired leases are available. Setup: 4 leases, server foo holds 0, bar holds 1, baz holds 5. - * + * * Foo should steal from baz. */ @Test diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java index b6e74a6b..4e927f31 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java @@ -14,18 +14,19 @@ */ package software.amazon.kinesis.leases.dynamodb; -import com.google.common.collect.ImmutableList; - import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; +import com.google.common.collect.ImmutableList; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -46,19 +47,25 @@ public class DynamoDBLeaseTakerTest { private static final String WORKER_IDENTIFIER = "foo"; private static final long LEASE_DURATION_MILLIS = 1000L; + private static final int DEFAULT_VERY_OLD_LEASE_DURATION_MULTIPLIER = 3; + private static final int VERY_OLD_LEASE_DURATION_MULTIPLIER = 5; + private static final long MOCK_CURRENT_TIME = 10000000000L; private DynamoDBLeaseTaker dynamoDBLeaseTaker; @Mock private LeaseRefresher leaseRefresher; + @Mock private MetricsFactory metricsFactory; + @Mock private Callable timeProvider; @Before public void setup() { - this.dynamoDBLeaseTaker = new DynamoDBLeaseTaker(leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory); + this.dynamoDBLeaseTaker = + new DynamoDBLeaseTaker(leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory); } /** @@ -67,10 +74,10 @@ public class DynamoDBLeaseTakerTest { @Test public final void testStringJoin() { List strings = new ArrayList<>(); - + strings.add("foo"); Assert.assertEquals("foo", DynamoDBLeaseTaker.stringJoin(strings, ", ")); - + strings.add("bar"); Assert.assertEquals("foo, bar", DynamoDBLeaseTaker.stringJoin(strings, ", ")); } @@ -88,7 +95,7 @@ public class DynamoDBLeaseTakerTest { when(leaseRefresher.listLeases()).thenReturn(leases); when(metricsFactory.createMetrics()).thenReturn(new NullMetricsScope()); - when(timeProvider.call()).thenReturn(1000L); + when(timeProvider.call()).thenReturn(MOCK_CURRENT_TIME); final Map actualOutput = dynamoDBLeaseTaker.computeLeaseCounts(ImmutableList.of()); @@ -112,7 +119,7 @@ public class DynamoDBLeaseTakerTest { when(leaseRefresher.listLeases()).thenReturn(leases); when(metricsFactory.createMetrics()).thenReturn(new NullMetricsScope()); - when(timeProvider.call()).thenReturn(1000L); + when(timeProvider.call()).thenReturn(MOCK_CURRENT_TIME); final Map actualOutput = dynamoDBLeaseTaker.computeLeaseCounts(leases); @@ -121,6 +128,63 @@ public class DynamoDBLeaseTakerTest { assertEquals(expectedOutput, actualOutput); } + @Test + public void test_veryOldLeaseDurationNanosMultiplierGetsCorrectLeases() throws Exception { + long veryOldThreshold = MOCK_CURRENT_TIME + - (TimeUnit.MILLISECONDS.toNanos(LEASE_DURATION_MILLIS) * VERY_OLD_LEASE_DURATION_MULTIPLIER); + DynamoDBLeaseTaker dynamoDBLeaseTakerWithCustomMultiplier = new DynamoDBLeaseTaker( + leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory) + .withVeryOldLeaseDurationNanosMultiplier(VERY_OLD_LEASE_DURATION_MULTIPLIER); + final List allLeases = new ImmutableList.Builder() + .add(createLease("foo", "2", MOCK_CURRENT_TIME)) + .add(createLease("bar", "3", veryOldThreshold - 1)) + .add(createLease("baz", "4", veryOldThreshold)) + .build(); + final List expiredLeases = allLeases.subList(1, 3); + + dynamoDBLeaseTakerWithCustomMultiplier.allLeases.putAll( + allLeases.stream().collect(Collectors.toMap(Lease::leaseKey, Function.identity()))); + when(leaseRefresher.listLeases()).thenReturn(allLeases); + when(metricsFactory.createMetrics()).thenReturn(new NullMetricsScope()); + when(timeProvider.call()).thenReturn(MOCK_CURRENT_TIME); + + Set output = dynamoDBLeaseTakerWithCustomMultiplier.computeLeasesToTake(expiredLeases, timeProvider); + final Set expectedOutput = new HashSet<>(); + expectedOutput.add(allLeases.get(1)); + assertEquals(expectedOutput, output); + } + + @Test + public void test_disableEnablePriorityLeaseAssignmentGetsCorrectLeases() throws Exception { + long veryOldThreshold = MOCK_CURRENT_TIME + - (TimeUnit.MILLISECONDS.toNanos(LEASE_DURATION_MILLIS) * DEFAULT_VERY_OLD_LEASE_DURATION_MULTIPLIER); + DynamoDBLeaseTaker dynamoDBLeaseTakerWithDisabledPriorityLeaseAssignment = new DynamoDBLeaseTaker( + leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory) + .withEnablePriorityLeaseAssignment(false); + final List allLeases = new ArrayList<>(); + allLeases.add(createLease("bar", "2", MOCK_CURRENT_TIME)); + allLeases.add(createLease("bar", "3", MOCK_CURRENT_TIME)); + allLeases.add(createLease("bar", "4", MOCK_CURRENT_TIME)); + allLeases.add(createLease("baz", "5", veryOldThreshold - 1)); + allLeases.add(createLease("baz", "6", veryOldThreshold + 1)); + allLeases.add(createLease(null, "7")); + final List expiredLeases = allLeases.subList(3, 6); + + dynamoDBLeaseTakerWithDisabledPriorityLeaseAssignment.allLeases.putAll( + allLeases.stream().collect(Collectors.toMap(Lease::leaseKey, Function.identity()))); + when(leaseRefresher.listLeases()).thenReturn(allLeases); + when(metricsFactory.createMetrics()).thenReturn(new NullMetricsScope()); + when(timeProvider.call()).thenReturn(MOCK_CURRENT_TIME); + + Set output = + dynamoDBLeaseTakerWithDisabledPriorityLeaseAssignment.computeLeasesToTake(expiredLeases, timeProvider); + final Set expectedOutput = new HashSet<>(); + expectedOutput.add(createLease("baz", "5", veryOldThreshold - 1)); + expectedOutput.add(createLease("baz", "6", veryOldThreshold + 1)); + expectedOutput.add(createLease(null, "7")); + assertEquals(expectedOutput, output); + } + private Lease createLease(String leaseOwner, String leaseKey) { final Lease lease = new Lease(); lease.checkpoint(new ExtendedSequenceNumber("checkpoint")); @@ -132,4 +196,17 @@ public class DynamoDBLeaseTakerTest { lease.leaseKey(leaseKey); return lease; } + + private Lease createLease(String leaseOwner, String leaseKey, long lastCounterIncrementNanos) { + final Lease lease = new Lease(); + lease.checkpoint(new ExtendedSequenceNumber("checkpoint")); + lease.ownerSwitchesSinceCheckpoint(0L); + lease.leaseCounter(0L); + lease.leaseOwner(leaseOwner); + lease.parentShardIds(Collections.singleton("parentShardId")); + lease.childShardIds(new HashSet<>()); + lease.leaseKey(leaseKey); + lease.lastCounterIncrementNanos(lastCounterIncrementNanos); + return lease; + } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java index 00db6a51..38e4f50c 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java @@ -14,9 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -32,6 +29,9 @@ import software.amazon.kinesis.leases.exceptions.InvalidStateException; import software.amazon.kinesis.leases.exceptions.LeasingException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + public class TestHarnessBuilder { private long currentTimeNanos; @@ -46,7 +46,6 @@ public class TestHarnessBuilder { public Long call() throws Exception { return currentTimeNanos; } - }; public TestHarnessBuilder(final DynamoDBLeaseRefresher leaseRefresher) { @@ -96,8 +95,7 @@ public class TestHarnessBuilder { currentTimeNanos += millis * 1000000; } - public Map takeMutateAssert(DynamoDBLeaseTaker taker, int numToTake) - throws LeasingException { + public Map takeMutateAssert(DynamoDBLeaseTaker taker, int numToTake) throws LeasingException { Map result = taker.takeLeases(timeProvider); assertEquals(numToTake, result.size()); @@ -111,8 +109,7 @@ public class TestHarnessBuilder { return result; } - public Map stealMutateAssert(DynamoDBLeaseTaker taker, int numToTake) - throws LeasingException { + public Map stealMutateAssert(DynamoDBLeaseTaker taker, int numToTake) throws LeasingException { Map result = taker.takeLeases(timeProvider); assertEquals(numToTake, result.size()); @@ -120,8 +117,7 @@ public class TestHarnessBuilder { Lease original = leases.get(actual.leaseKey()); assertNotNull(original); - original.isMarkedForLeaseSteal(true) - .lastCounterIncrementNanos(actual.lastCounterIncrementNanos()); + original.isMarkedForLeaseSteal(true).lastCounterIncrementNanos(actual.lastCounterIncrementNanos()); mutateAssert(taker.getWorkerIdentifier(), original, actual); } @@ -129,7 +125,7 @@ public class TestHarnessBuilder { } public Map takeMutateAssert(DynamoDBLeaseTaker taker, String... takenShardIds) - throws LeasingException { + throws LeasingException { Map result = taker.takeLeases(timeProvider); assertEquals(takenShardIds.length, result.size()); @@ -157,7 +153,7 @@ public class TestHarnessBuilder { } public void addLeasesToRenew(LeaseRenewer renewer, String... shardIds) - throws DependencyException, InvalidStateException { + throws DependencyException, InvalidStateException { List leasesToRenew = new ArrayList(); for (String shardId : shardIds) { @@ -170,7 +166,7 @@ public class TestHarnessBuilder { } public Map renewMutateAssert(LeaseRenewer renewer, String... renewedShardIds) - throws DependencyException, InvalidStateException { + throws DependencyException, InvalidStateException { renewer.renewLeases(); Map heldLeases = renewer.getCurrentlyHeldLeases(); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BasicStreamConsumerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BasicStreamConsumerIntegrationTest.java new file mode 100644 index 00000000..f6e7ba7e --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BasicStreamConsumerIntegrationTest.java @@ -0,0 +1,44 @@ +package software.amazon.kinesis.lifecycle; + +import org.junit.Test; +import software.amazon.kinesis.application.TestConsumer; +import software.amazon.kinesis.config.KCLAppConfig; +import software.amazon.kinesis.config.ReleaseCanaryPollingH1TestConfig; +import software.amazon.kinesis.config.ReleaseCanaryPollingH2TestConfig; +import software.amazon.kinesis.config.ReleaseCanaryStreamingTestConfig; + +public class BasicStreamConsumerIntegrationTest { + + /** + * Test with a polling consumer using HTTP2 protocol. + * In the polling case, consumer makes calls to the producer each time to request records to process. + */ + @Test + public void kclReleaseCanaryPollingH2Test() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryPollingH2TestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } + + /** + * Test with a polling consumer using HTTP1 protocol. + * In the polling case, consumer makes calls to the producer each time to request records to process. + */ + @Test + public void kclReleaseCanaryPollingH1Test() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryPollingH1TestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } + + /** + * Test with a streaming consumer. + * In the streaming configuration, connection is made once between consumer and producer and producer continuously sends data to be processed. + */ + @Test + public void kclReleaseCanaryStreamingTest() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryStreamingTestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java index 06a72230..3d82acc3 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java @@ -14,19 +14,11 @@ */ package software.amazon.kinesis.lifecycle; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.util.ArrayList; import java.util.List; -import java.util.Optional; import org.junit.Before; import org.junit.Test; - import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseRefresher; import software.amazon.kinesis.leases.ShardInfo; @@ -35,6 +27,12 @@ import software.amazon.kinesis.leases.exceptions.InvalidStateException; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + /** * */ @@ -43,7 +41,7 @@ public class BlockOnParentShardTaskTest { private final String shardId = "shardId-97"; private final String streamId = "123:stream:146"; private final String concurrencyToken = "testToken"; - private final List emptyParentShardIds = new ArrayList(); + private final List emptyParentShardIds = new ArrayList<>(); private ShardInfo shardInfo; @Before @@ -59,7 +57,7 @@ public class BlockOnParentShardTaskTest { */ @Test public final void testCallNoParents() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { LeaseRefresher leaseRefresher = mock(LeaseRefresher.class); when(leaseRefresher.getLease(shardId)).thenReturn(null); @@ -76,8 +74,7 @@ public class BlockOnParentShardTaskTest { */ @Test public final void testCallShouldNotThrowBlockedOnParentWhenParentsHaveFinished() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - + throws DependencyException, InvalidStateException, ProvisionedThroughputException { ShardInfo shardInfo = null; BlockOnParentShardTask task = null; String parent1ShardId = "shardId-1"; @@ -118,7 +115,6 @@ public class BlockOnParentShardTaskTest { @Test public final void testCallShouldNotThrowBlockedOnParentWhenParentsHaveFinishedMultiStream() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - ShardInfo shardInfo = null; BlockOnParentShardTask task = null; String parent1LeaseKey = streamId + ":" + "shardId-1"; @@ -139,15 +135,16 @@ public class BlockOnParentShardTaskTest { // test single parent parentShardIds.add(parent1ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, - streamId); + shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); result = task.call(); assertNull(result.getException()); // test two parents parentShardIds.add(parent2ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); + shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); result = task.call(); assertNull(result.getException()); @@ -162,7 +159,6 @@ public class BlockOnParentShardTaskTest { @Test public final void testCallWhenParentsHaveNotFinished() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - ShardInfo shardInfo = null; BlockOnParentShardTask task = null; String parent1ShardId = "shardId-1"; @@ -226,14 +222,16 @@ public class BlockOnParentShardTaskTest { // test single parent parentShardIds.add(parent1ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); + shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); result = task.call(); assertNotNull(result.getException()); // test two parents parentShardIds.add(parent2ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); + shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); result = task.call(); assertNotNull(result.getException()); @@ -247,13 +245,14 @@ public class BlockOnParentShardTaskTest { */ @Test public final void testCallBeforeAndAfterAParentFinishes() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { BlockOnParentShardTask task = null; String parentShardId = "shardId-1"; List parentShardIds = new ArrayList<>(); parentShardIds.add(parentShardId); - ShardInfo shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + ShardInfo shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); TaskResult result = null; Lease parentLease = new Lease(); LeaseRefresher leaseRefresher = mock(LeaseRefresher.class); @@ -280,5 +279,4 @@ public class BlockOnParentShardTaskTest { BlockOnParentShardTask task = new BlockOnParentShardTask(shardInfo, null, backoffTimeInMillis); assertEquals(TaskType.BLOCK_ON_PARENT_SHARDS, task.taskType()); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java index f94d82fd..9c9f1930 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java @@ -14,16 +14,6 @@ */ package software.amazon.kinesis.lifecycle; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; - import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; @@ -39,19 +29,17 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.ChildShard; import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.leases.HierarchicalShardSyncer; import software.amazon.kinesis.leases.LeaseCleanupManager; import software.amazon.kinesis.leases.LeaseCoordinator; import software.amazon.kinesis.leases.LeaseRefresher; import software.amazon.kinesis.leases.ShardDetector; import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.HierarchicalShardSyncer; import software.amazon.kinesis.leases.ShardObjectHelper; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.metrics.MetricsFactory; @@ -62,47 +50,64 @@ import software.amazon.kinesis.retrieval.AggregatorUtil; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.schemaregistry.SchemaRegistryDecoder; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; + @RunWith(MockitoJUnitRunner.class) public class ConsumerStatesTest { private static final String STREAM_NAME = "TestStream"; - private static final InitialPositionInStreamExtended INITIAL_POSITION_IN_STREAM = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + private static final InitialPositionInStreamExtended INITIAL_POSITION_IN_STREAM = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); private ShardConsumer consumer; private ShardConsumerArgument argument; @Mock private ShardRecordProcessor shardRecordProcessor; + @Mock private ShardRecordProcessorCheckpointer recordProcessorCheckpointer; + @Mock private ExecutorService executorService; + @Mock private ShardInfo shardInfo; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private LeaseRefresher leaseRefresher; + @Mock private Checkpointer checkpointer; + @Mock private ShutdownNotification shutdownNotification; - @Mock - private InitialPositionInStreamExtended initialPositionInStream; + @Mock private RecordsPublisher recordsPublisher; - @Mock - private KinesisAsyncClient kinesisClient; + @Mock private ShardDetector shardDetector; + @Mock private HierarchicalShardSyncer hierarchicalShardSyncer; + @Mock private MetricsFactory metricsFactory; + @Mock private ProcessRecordsInput processRecordsInput; + @Mock private TaskExecutionListener taskExecutionListener; + @Mock private LeaseCleanupManager leaseCleanupManager; @@ -121,20 +126,47 @@ public class ConsumerStatesTest { @Before public void setup() { - argument = new ShardConsumerArgument(shardInfo, StreamIdentifier.singleStreamInstance(STREAM_NAME), leaseCoordinator, executorService, recordsPublisher, - shardRecordProcessor, checkpointer, recordProcessorCheckpointer, parentShardPollIntervalMillis, - taskBackoffTimeMillis, skipShardSyncAtWorkerInitializationIfLeasesExist, listShardsBackoffTimeInMillis, - maxListShardsRetryAttempts, shouldCallProcessRecordsEvenForEmptyRecordList, idleTimeInMillis, - INITIAL_POSITION_IN_STREAM, cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, shardDetector, - new AggregatorUtil(), hierarchicalShardSyncer, metricsFactory, leaseCleanupManager, schemaRegistryDecoder); + argument = new ShardConsumerArgument( + shardInfo, + StreamIdentifier.singleStreamInstance(STREAM_NAME), + leaseCoordinator, + executorService, + recordsPublisher, + shardRecordProcessor, + checkpointer, + recordProcessorCheckpointer, + parentShardPollIntervalMillis, + taskBackoffTimeMillis, + skipShardSyncAtWorkerInitializationIfLeasesExist, + listShardsBackoffTimeInMillis, + maxListShardsRetryAttempts, + shouldCallProcessRecordsEvenForEmptyRecordList, + idleTimeInMillis, + INITIAL_POSITION_IN_STREAM, + cleanupLeasesOfCompletedShards, + ignoreUnexpectedChildShards, + shardDetector, + new AggregatorUtil(), + hierarchicalShardSyncer, + metricsFactory, + leaseCleanupManager, + schemaRegistryDecoder); when(shardInfo.shardId()).thenReturn("shardId-000000000000"); - when(shardInfo.streamIdentifierSerOpt()).thenReturn(Optional.of(StreamIdentifier.singleStreamInstance(STREAM_NAME).serialize())); - consumer = spy(new ShardConsumer(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, - argument, taskExecutionListener, 0)); + when(shardInfo.streamIdentifierSerOpt()) + .thenReturn(Optional.of( + StreamIdentifier.singleStreamInstance(STREAM_NAME).serialize())); + consumer = spy(new ShardConsumer( + recordsPublisher, + executorService, + shardInfo, + logWarningForTaskAfterMillis, + argument, + taskExecutionListener, + 0)); when(recordProcessorCheckpointer.checkpointer()).thenReturn(checkpointer); } - private static final Class LEASE_REFRESHER_CLASS = (Class) (Class) LeaseRefresher.class; + private static final Class LEASE_REFRESHER_CLASS = LeaseRefresher.class; @Test public void blockOnParentStateTest() { @@ -144,20 +176,30 @@ public class ConsumerStatesTest { ConsumerTask task = state.createTask(argument, consumer, null); assertThat(task, taskWith(BlockOnParentShardTask.class, ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, taskWith(BlockOnParentShardTask.class, LEASE_REFRESHER_CLASS, "leaseRefresher", - equalTo(leaseRefresher))); - assertThat(task, taskWith(BlockOnParentShardTask.class, Long.class, "parentShardPollIntervalMillis", - equalTo(parentShardPollIntervalMillis))); + assertThat( + task, + taskWith( + BlockOnParentShardTask.class, + LEASE_REFRESHER_CLASS, + "leaseRefresher", + equalTo(leaseRefresher))); + assertThat( + task, + taskWith( + BlockOnParentShardTask.class, + Long.class, + "parentShardPollIntervalMillis", + equalTo(parentShardPollIntervalMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.INITIALIZING.consumerState())); for (ShutdownReason shutdownReason : ShutdownReason.values()) { - assertThat(state.shutdownTransition(shutdownReason), + assertThat( + state.shutdownTransition(shutdownReason), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); } assertThat(state.state(), equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)); assertThat(state.taskType(), equalTo(TaskType.BLOCK_ON_PARENT_SHARDS)); - } @Test @@ -168,17 +210,24 @@ public class ConsumerStatesTest { assertThat(task, initTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); assertThat(task, initTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); assertThat(task, initTask(Checkpointer.class, "checkpoint", equalTo(checkpointer))); - assertThat(task, initTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, + initTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, initTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.INITIALIZING)); @@ -193,22 +242,28 @@ public class ConsumerStatesTest { assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); assertThat(task, procTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, procTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, + procTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.PROCESSING)); assertThat(state.taskType(), equalTo(TaskType.PROCESS)); - } @Test @@ -219,22 +274,28 @@ public class ConsumerStatesTest { assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); assertThat(task, procTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, procTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, + procTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.PROCESSING)); assertThat(state.taskType(), equalTo(TaskType.PROCESS)); - } @Test @@ -245,17 +306,24 @@ public class ConsumerStatesTest { assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); assertThat(task, procTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, procTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, + procTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.PROCESSING)); @@ -269,24 +337,32 @@ public class ConsumerStatesTest { consumer.gracefulShutdown(shutdownNotification); ConsumerTask task = state.createTask(argument, consumer, null); - assertThat(task, + assertThat( + task, shutdownReqTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, shutdownReqTask(RecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); - assertThat(task, + assertThat( + task, + shutdownReqTask( + RecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); + assertThat( + task, shutdownReqTask(ShutdownNotification.class, "shutdownNotification", equalTo(shutdownNotification))); assertThat(state.successTransition(), equalTo(ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE)); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE)); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED)); assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN_NOTIFICATION)); - } @Test @@ -298,14 +374,15 @@ public class ConsumerStatesTest { assertThat(state.successTransition(), equalTo(state)); assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(state)); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED)); assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN_NOTIFICATION)); - } @Test @@ -316,28 +393,33 @@ public class ConsumerStatesTest { List parentShards = new ArrayList<>(); parentShards.add("shardId-000000000000"); ChildShard leftChild = ChildShard.builder() - .shardId("shardId-000000000001") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) - .build(); + .shardId("shardId-000000000001") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) + .build(); ChildShard rightChild = ChildShard.builder() - .shardId("shardId-000000000002") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) - .build(); + .shardId("shardId-000000000002") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) + .build(); childShards.add(leftChild); childShards.add(rightChild); when(processRecordsInput.childShards()).thenReturn(childShards); ConsumerTask task = state.createTask(argument, consumer, processRecordsInput); assertThat(task, shutdownTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, - shutdownTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, shutdownTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, shutdownTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); + assertThat( + task, + shutdownTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, shutdownTask(ShutdownReason.class, "reason", equalTo(reason))); assertThat(task, shutdownTask(LeaseCoordinator.class, "leaseCoordinator", equalTo(leaseCoordinator))); - assertThat(task, + assertThat( + task, shutdownTask(Boolean.class, "cleanupLeasesOfCompletedShards", equalTo(cleanupLeasesOfCompletedShards))); assertThat(task, shutdownTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); @@ -349,7 +431,6 @@ public class ConsumerStatesTest { assertThat(state.state(), equalTo(ShardConsumerState.SHUTTING_DOWN)); assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN)); - } @Test @@ -359,31 +440,19 @@ public class ConsumerStatesTest { ConsumerState state = ShardConsumerState.SHUTDOWN_COMPLETE.consumerState(); assertThat(state.createTask(argument, consumer, null), nullValue()); - verify(consumer, times(2)).shutdownNotification(); - verify(shutdownNotification).shutdownComplete(); assertThat(state.successTransition(), equalTo(state)); for (ShutdownReason reason : ShutdownReason.values()) { assertThat(state.shutdownTransition(reason), equalTo(state)); } + assertThat(state.isTerminal(), equalTo(true)); assertThat(state.state(), equalTo(ShardConsumerState.SHUTDOWN_COMPLETE)); assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN_COMPLETE)); } - @Test - public void shutdownCompleteStateNullNotificationTest() { - ConsumerState state = ShardConsumerState.SHUTDOWN_COMPLETE.consumerState(); - - when(consumer.shutdownNotification()).thenReturn(null); - assertThat(state.createTask(argument, consumer, null), nullValue()); - - verify(consumer).shutdownNotification(); - verify(shutdownNotification, never()).shutdownComplete(); - } - - static ReflectionPropertyMatcher shutdownTask(Class valueTypeClass, - String propertyName, Matcher matcher) { + static ReflectionPropertyMatcher shutdownTask( + Class valueTypeClass, String propertyName, Matcher matcher) { return taskWith(ShutdownTask.class, valueTypeClass, propertyName, matcher); } @@ -392,18 +461,21 @@ public class ConsumerStatesTest { return taskWith(ShutdownNotificationTask.class, valueTypeClass, propertyName, matcher); } - static ReflectionPropertyMatcher procTask(Class valueTypeClass, - String propertyName, Matcher matcher) { + static ReflectionPropertyMatcher procTask( + Class valueTypeClass, String propertyName, Matcher matcher) { return taskWith(ProcessTask.class, valueTypeClass, propertyName, matcher); } - static ReflectionPropertyMatcher initTask(Class valueTypeClass, - String propertyName, Matcher matcher) { + static ReflectionPropertyMatcher initTask( + Class valueTypeClass, String propertyName, Matcher matcher) { return taskWith(InitializeTask.class, valueTypeClass, propertyName, matcher); } - static ReflectionPropertyMatcher taskWith(Class taskTypeClass, - Class valueTypeClass, String propertyName, Matcher matcher) { + static ReflectionPropertyMatcher taskWith( + Class taskTypeClass, + Class valueTypeClass, + String propertyName, + Matcher matcher) { return new ReflectionPropertyMatcher<>(taskTypeClass, valueTypeClass, matcher, propertyName); } @@ -416,8 +488,11 @@ public class ConsumerStatesTest { private final String propertyName; private final Field matchingField; - private ReflectionPropertyMatcher(Class taskTypeClass, Class valueTypeClass, - Matcher matcher, String propertyName) { + private ReflectionPropertyMatcher( + Class taskTypeClass, + Class valueTypeClass, + Matcher matcher, + String propertyName) { this.taskTypeClass = taskTypeClass; this.valueTypeClazz = valueTypeClass; this.matcher = matcher; @@ -431,50 +506,59 @@ public class ConsumerStatesTest { } } this.matchingField = matching; - } @Override protected boolean matchesSafely(ConsumerTask item, Description mismatchDescription) { - return Condition.matched(item, mismatchDescription).and(new Condition.Step() { - @Override - public Condition apply(ConsumerTask value, Description mismatch) { - if (taskTypeClass.equals(value.getClass())) { - return Condition.matched(taskTypeClass.cast(value), mismatch); - } - mismatch.appendText("Expected task type of ").appendText(taskTypeClass.getName()) - .appendText(" but was ").appendText(value.getClass().getName()); - return Condition.notMatched(); - } - }).and(new Condition.Step() { - @Override - public Condition apply(TaskType value, Description mismatch) { - if (matchingField == null) { - mismatch.appendText("Field ").appendText(propertyName).appendText(" not present in ") - .appendText(taskTypeClass.getName()); - return Condition.notMatched(); - } + return Condition.matched(item, mismatchDescription) + .and(new Condition.Step() { + @Override + public Condition apply(ConsumerTask value, Description mismatch) { + if (taskTypeClass.equals(value.getClass())) { + return Condition.matched(taskTypeClass.cast(value), mismatch); + } + mismatch.appendText("Expected task type of ") + .appendText(taskTypeClass.getName()) + .appendText(" but was ") + .appendText(value.getClass().getName()); + return Condition.notMatched(); + } + }) + .and(new Condition.Step() { + @Override + public Condition apply(TaskType value, Description mismatch) { + if (matchingField == null) { + mismatch.appendText("Field ") + .appendText(propertyName) + .appendText(" not present in ") + .appendText(taskTypeClass.getName()); + return Condition.notMatched(); + } - try { - return Condition.matched(getValue(value), mismatch); - } catch (RuntimeException re) { - mismatch.appendText("Failure while retrieving value for ").appendText(propertyName); - return Condition.notMatched(); - } - - } - }).and(new Condition.Step() { - @Override - public Condition apply(Object value, Description mismatch) { - if (value != null && !valueTypeClazz.isAssignableFrom(value.getClass())) { - mismatch.appendText("Expected a value of type ").appendText(valueTypeClazz.getName()) - .appendText(" but was ").appendText(value.getClass().getName()); - return Condition.notMatched(); - } - return Condition.matched(valueTypeClazz.cast(value), mismatch); - } - }).matching(matcher); + try { + return Condition.matched(getValue(value), mismatch); + } catch (RuntimeException re) { + mismatch.appendText("Failure while retrieving value for ") + .appendText(propertyName); + return Condition.notMatched(); + } + } + }) + .and(new Condition.Step() { + @Override + public Condition apply(Object value, Description mismatch) { + if (value != null && !valueTypeClazz.isAssignableFrom(value.getClass())) { + mismatch.appendText("Expected a value of type ") + .appendText(valueTypeClazz.getName()) + .appendText(" but was ") + .appendText(value.getClass().getName()); + return Condition.notMatched(); + } + return Condition.matched(valueTypeClazz.cast(value), mismatch); + } + }) + .matching(matcher); } @Override @@ -495,5 +579,4 @@ public class ConsumerStatesTest { } } } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/CrossAccountStreamConsumerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/CrossAccountStreamConsumerIntegrationTest.java new file mode 100644 index 00000000..d9d1371c --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/CrossAccountStreamConsumerIntegrationTest.java @@ -0,0 +1,45 @@ +package software.amazon.kinesis.lifecycle; + +import org.junit.Test; +import software.amazon.kinesis.application.TestConsumer; +import software.amazon.kinesis.config.KCLAppConfig; +import software.amazon.kinesis.config.crossaccount.ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig; +import software.amazon.kinesis.config.crossaccount.ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig; +import software.amazon.kinesis.config.crossaccount.ReleaseCanaryCrossAccountPollingH2TestConfig; +import software.amazon.kinesis.config.crossaccount.ReleaseCanaryCrossAccountStreamingTestConfig; + +public class CrossAccountStreamConsumerIntegrationTest { + + /** + * Test with a cross account polling consumer using HTTP2 protocol. + * In the polling case, consumer makes calls to the producer each time to request records to process. + * The stream is in a different account than the kinesis client used to get records. + */ + @Test + public void kclReleaseCanaryCrossAccountPollingH2Test() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryCrossAccountPollingH2TestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } + + @Test + public void kclReleaseCanaryCrossAccountStreamingTest() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryCrossAccountStreamingTestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } + + @Test + public void kclReleaseCanaryCrossAccountMultiStreamStreamingTest() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } + + @Test + public void kclReleaseCanaryCrossAccountMultiStreamPollingH2Test() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/MultiStreamConsumerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/MultiStreamConsumerIntegrationTest.java new file mode 100644 index 00000000..cbccf1f2 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/MultiStreamConsumerIntegrationTest.java @@ -0,0 +1,23 @@ +package software.amazon.kinesis.lifecycle; + +import org.junit.Test; +import software.amazon.kinesis.application.TestConsumer; +import software.amazon.kinesis.config.KCLAppConfig; +import software.amazon.kinesis.config.ReleaseCanaryMultiStreamPollingH2TestConfig; +import software.amazon.kinesis.config.multistream.ReleaseCanaryMultiStreamStreamingTestConfig; + +public class MultiStreamConsumerIntegrationTest { + @Test + public void kclReleaseCanaryMultiStreamPollingTest() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryMultiStreamPollingH2TestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } + + @Test + public void kclReleaseCanaryMultiStreamStreamingTest() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryMultiStreamStreamingTestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java index 12476837..300ad832 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java @@ -14,24 +14,6 @@ */ package software.amazon.kinesis.lifecycle; -import static org.hamcrest.CoreMatchers.allOf; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.beans.HasPropertyWithValue.hasProperty; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.io.ByteArrayOutputStream; import java.math.BigInteger; import java.nio.ByteBuffer; @@ -52,6 +34,9 @@ import java.util.concurrent.TimeUnit; import com.amazonaws.services.schemaregistry.common.Schema; import com.amazonaws.services.schemaregistry.deserializers.GlueSchemaRegistryDeserializer; import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import lombok.Data; +import lombok.Getter; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; @@ -61,11 +46,6 @@ import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - -import com.google.protobuf.ByteString; - -import lombok.Data; -import lombok.Getter; import software.amazon.awssdk.services.kinesis.model.HashKeyRange; import software.amazon.awssdk.services.kinesis.model.Shard; import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; @@ -83,6 +63,24 @@ import software.amazon.kinesis.retrieval.kpl.Messages; import software.amazon.kinesis.retrieval.kpl.Messages.AggregatedRecord; import software.amazon.kinesis.schemaregistry.SchemaRegistryDecoder; +import static org.hamcrest.CoreMatchers.allOf; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.beans.HasPropertyWithValue.hasProperty; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class ProcessTaskTest { private static final long IDLE_TIME_IN_MILLISECONDS = 100L; @@ -95,28 +93,29 @@ public class ProcessTaskTest { @Mock private ProcessRecordsInput processRecordsInput; + @Mock private ShardDetector shardDetector; @Mock private GlueSchemaRegistryDeserializer glueSchemaRegistryDeserializer; - - private static final byte[] TEST_DATA = new byte[] { 1, 2, 3, 4 }; + private static final byte[] TEST_DATA = new byte[] {1, 2, 3, 4}; private final String shardId = "shard-test"; private final long taskBackoffTimeMillis = 1L; @Mock private ShardRecordProcessor shardRecordProcessor; + @Mock private ShardRecordProcessorCheckpointer checkpointer; + @Mock private ThrottlingReporter throttlingReporter; private ProcessTask processTask; - @Before public void setUpProcessTask() { when(checkpointer.checkpointer()).thenReturn(mock(Checkpointer.class)); @@ -125,35 +124,47 @@ public class ProcessTaskTest { } private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput) { - return makeProcessTask(processRecordsInput, new AggregatorUtil(), - skipShardSyncAtWorkerInitializationIfLeasesExist); + return makeProcessTask( + processRecordsInput, new AggregatorUtil(), skipShardSyncAtWorkerInitializationIfLeasesExist); } - private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput, GlueSchemaRegistryDeserializer deserializer) { - return makeProcessTask(processRecordsInput, new AggregatorUtil(), skipShardSyncAtWorkerInitializationIfLeasesExist, new SchemaRegistryDecoder(deserializer)); + private ProcessTask makeProcessTask( + ProcessRecordsInput processRecordsInput, GlueSchemaRegistryDeserializer deserializer) { + return makeProcessTask( + processRecordsInput, + new AggregatorUtil(), + skipShardSyncAtWorkerInitializationIfLeasesExist, + new SchemaRegistryDecoder(deserializer)); } - private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput, AggregatorUtil aggregatorUtil, - boolean skipShardSync) { + private ProcessTask makeProcessTask( + ProcessRecordsInput processRecordsInput, AggregatorUtil aggregatorUtil, boolean skipShardSync) { return makeProcessTask(processRecordsInput, aggregatorUtil, skipShardSync, null); } - private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput, AggregatorUtil aggregatorUtil, boolean skipShardSync, - SchemaRegistryDecoder schemaRegistryDecoder) { - return new ProcessTask(shardInfo, shardRecordProcessor, checkpointer, taskBackoffTimeMillis, - skipShardSync, shardDetector, throttlingReporter, - processRecordsInput, shouldCallProcessRecordsEvenForEmptyRecordList, IDLE_TIME_IN_MILLISECONDS, - aggregatorUtil, - new NullMetricsFactory(), - schemaRegistryDecoder - ); + private ProcessTask makeProcessTask( + ProcessRecordsInput processRecordsInput, + AggregatorUtil aggregatorUtil, + boolean skipShardSync, + SchemaRegistryDecoder schemaRegistryDecoder) { + return new ProcessTask( + shardInfo, + shardRecordProcessor, + checkpointer, + taskBackoffTimeMillis, + skipShardSync, + shardDetector, + throttlingReporter, + processRecordsInput, + shouldCallProcessRecordsEvenForEmptyRecordList, + IDLE_TIME_IN_MILLISECONDS, + aggregatorUtil, + new NullMetricsFactory(), + schemaRegistryDecoder); } - - @Test public void testProcessTaskWithShardEndReached() { - processTask = makeProcessTask(processRecordsInput); when(processRecordsInput.isAtShardEnd()).thenReturn(true); @@ -162,13 +173,23 @@ public class ProcessTaskTest { } private KinesisClientRecord makeKinesisClientRecord(String partitionKey, String sequenceNumber, Instant arrival) { - return KinesisClientRecord.builder().partitionKey(partitionKey).sequenceNumber(sequenceNumber) - .approximateArrivalTimestamp(arrival).data(ByteBuffer.wrap(TEST_DATA)).build(); + return KinesisClientRecord.builder() + .partitionKey(partitionKey) + .sequenceNumber(sequenceNumber) + .approximateArrivalTimestamp(arrival) + .data(ByteBuffer.wrap(TEST_DATA)) + .build(); } - private KinesisClientRecord makeKinesisClientRecord(String partitionKey, String sequenceNumber, Instant arrival, ByteBuffer data, Schema schema) { - return KinesisClientRecord.builder().partitionKey(partitionKey).sequenceNumber(sequenceNumber) - .approximateArrivalTimestamp(arrival).data(data).schema(schema).build(); + private KinesisClientRecord makeKinesisClientRecord( + String partitionKey, String sequenceNumber, Instant arrival, ByteBuffer data, Schema schema) { + return KinesisClientRecord.builder() + .partitionKey(partitionKey) + .sequenceNumber(sequenceNumber) + .approximateArrivalTimestamp(arrival) + .data(data) + .schema(schema) + .build(); } @Test @@ -203,13 +224,18 @@ public class ProcessTaskTest { final String sqn = new BigInteger(128, new Random()).toString(); final String pk = UUID.randomUUID().toString(); final Instant ts = Instant.now().minus(4, ChronoUnit.HOURS); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey("-").data(generateAggregatedRecord(pk)) - .sequenceNumber(sqn).approximateArrivalTimestamp(ts).build(); + KinesisClientRecord record = KinesisClientRecord.builder() + .partitionKey("-") + .data(generateAggregatedRecord(pk)) + .sequenceNumber(sqn) + .approximateArrivalTimestamp(ts) + .build(); processTask = makeProcessTask(processRecordsInput); ShardRecordProcessorOutcome outcome = testWithRecord(record); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(3, actualRecords.size()); for (KinesisClientRecord pr : actualRecords) { @@ -231,13 +257,17 @@ public class ProcessTaskTest { final String sqn = new BigInteger(128, new Random()).toString(); final String pk = UUID.randomUUID().toString(); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey("-").data(generateAggregatedRecord(pk)) - .sequenceNumber(sqn).build(); + KinesisClientRecord record = KinesisClientRecord.builder() + .partitionKey("-") + .data(generateAggregatedRecord(pk)) + .sequenceNumber(sqn) + .build(); processTask = makeProcessTask(processRecordsInput); ShardRecordProcessorOutcome outcome = testWithRecord(record); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(3, actualRecords.size()); for (KinesisClientRecord actualRecord : actualRecords) { @@ -255,11 +285,12 @@ public class ProcessTaskTest { final int numberOfRecords = 104; // Start these batch of records's sequence number that is greater than previous checkpoint value. final BigInteger startingSqn = previousCheckpointSqn.add(BigInteger.valueOf(10)); - final List records = generateConsecutiveRecords(numberOfRecords, "-", ByteBuffer.wrap(TEST_DATA), - new Date(), startingSqn); + final List records = + generateConsecutiveRecords(numberOfRecords, "-", ByteBuffer.wrap(TEST_DATA), new Date(), startingSqn); processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecords(records, + ShardRecordProcessorOutcome outcome = testWithRecords( + records, new ExtendedSequenceNumber(previousCheckpointSqn.toString()), new ExtendedSequenceNumber(previousCheckpointSqn.toString())); @@ -273,12 +304,12 @@ public class ProcessTaskTest { // Some sequence number value from previous processRecords call. final BigInteger baseSqn = new BigInteger(128, new Random()); final ExtendedSequenceNumber lastCheckpointEspn = new ExtendedSequenceNumber(baseSqn.toString()); - final ExtendedSequenceNumber largestPermittedEsqn = new ExtendedSequenceNumber( - baseSqn.add(BigInteger.valueOf(100)).toString()); + final ExtendedSequenceNumber largestPermittedEsqn = + new ExtendedSequenceNumber(baseSqn.add(BigInteger.valueOf(100)).toString()); processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecords(Collections.emptyList(), lastCheckpointEspn, - largestPermittedEsqn); + ShardRecordProcessorOutcome outcome = + testWithRecords(Collections.emptyList(), lastCheckpointEspn, largestPermittedEsqn); // Make sure that even with empty records, largest permitted sequence number does not change. assertEquals(largestPermittedEsqn, outcome.getCheckpointCall()); @@ -299,15 +330,20 @@ public class ProcessTaskTest { // Values for this processRecords call. String startingSqn = previousCheckpointSqn.toString(); String pk = UUID.randomUUID().toString(); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey("-").data(generateAggregatedRecord(pk)) - .sequenceNumber(startingSqn).build(); + KinesisClientRecord record = KinesisClientRecord.builder() + .partitionKey("-") + .data(generateAggregatedRecord(pk)) + .sequenceNumber(startingSqn) + .build(); processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecords(Collections.singletonList(record), + ShardRecordProcessorOutcome outcome = testWithRecords( + Collections.singletonList(record), new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn), new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn)); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); // First two records should be dropped - and only 1 remaining records should be there. assertThat(actualRecords.size(), equalTo(1)); @@ -320,8 +356,8 @@ public class ProcessTaskTest { assertThat(actualRecord.approximateArrivalTimestamp(), nullValue()); // Expected largest permitted sequence number will be last sub-record sequence number. - final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber( - previousCheckpointSqn.toString(), 2L); + final ExtendedSequenceNumber expectedLargestPermittedEsqn = + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 2L); assertEquals(expectedLargestPermittedEsqn, outcome.getCheckpointCall()); } @@ -338,48 +374,58 @@ public class ProcessTaskTest { int recordIndex = 0; sequenceNumber = sequenceNumber.add(BigInteger.ONE); for (int i = 0; i < 5; ++i) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - recordIndex, approximateArrivalTime); + KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord( + sequenceNumber, aggregatedRecord, recordIndex, approximateArrivalTime); aggregatorUtil.addInRange(expectedRecord); recordIndex++; } sequenceNumber = sequenceNumber.add(BigInteger.ONE); for (int i = 0; i < 5; ++i) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - recordIndex, approximateArrivalTime); + KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord( + sequenceNumber, aggregatedRecord, recordIndex, approximateArrivalTime); aggregatorUtil.addBelowRange(expectedRecord); recordIndex++; } sequenceNumber = sequenceNumber.add(BigInteger.ONE); for (int i = 0; i < 5; ++i) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - recordIndex, approximateArrivalTime); + KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord( + sequenceNumber, aggregatedRecord, recordIndex, approximateArrivalTime); aggregatorUtil.addAboveRange(expectedRecord); recordIndex++; } byte[] payload = aggregatedRecord.build().toByteArray(); ByteArrayOutputStream bos = new ByteArrayOutputStream(); - bos.write(new byte[] { -13, -119, -102, -62 }); + bos.write(new byte[] {-13, -119, -102, -62}); bos.write(payload); bos.write(md5(payload)); ByteBuffer rawRecordData = ByteBuffer.wrap(bos.toByteArray()); - KinesisClientRecord rawRecord = KinesisClientRecord.builder().data(rawRecordData) - .approximateArrivalTimestamp(approximateArrivalTime).partitionKey("p-01") - .sequenceNumber(sequenceNumber.toString()).build(); + KinesisClientRecord rawRecord = KinesisClientRecord.builder() + .data(rawRecordData) + .approximateArrivalTimestamp(approximateArrivalTime) + .partitionKey("p-01") + .sequenceNumber(sequenceNumber.toString()) + .build(); - when(shardDetector.shard(any())).thenReturn(Shard.builder().shardId("Shard-01") - .hashKeyRange(HashKeyRange.builder().startingHashKey(lowHashKey).endingHashKey(highHashKey).build()) - .build()); + when(shardDetector.shard(any())) + .thenReturn(Shard.builder() + .shardId("Shard-01") + .hashKeyRange(HashKeyRange.builder() + .startingHashKey(lowHashKey) + .endingHashKey(highHashKey) + .build()) + .build()); when(processRecordsInput.records()).thenReturn(Collections.singletonList(rawRecord)); ProcessTask processTask = makeProcessTask(processRecordsInput, aggregatorUtil, false); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, - new ExtendedSequenceNumber(sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber( + sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), new ExtendedSequenceNumber(sequenceNumber.toString(), recordIndex + 1L)); assertThat(outcome.processRecordsCall.records().size(), equalTo(0)); @@ -402,8 +448,8 @@ public class ProcessTaskTest { Instant approximateArrivalTime = Instant.now().minus(i + 4, ChronoUnit.SECONDS); sequenceNumber = sequenceNumber.add(BigInteger.ONE); for (int j = 0; j < 2; ++j) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - j, approximateArrivalTime); + KinesisClientRecord expectedRecord = + createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, j, approximateArrivalTime); aggregatorUtil.addInRange(expectedRecord); expectedRecords.add(expectedRecord); } @@ -416,20 +462,31 @@ public class ProcessTaskTest { ByteBuffer rawRecordData = ByteBuffer.wrap(bos.toByteArray()); - KinesisClientRecord rawRecord = KinesisClientRecord.builder().data(rawRecordData) - .approximateArrivalTimestamp(approximateArrivalTime).partitionKey("pa-" + i) - .sequenceNumber(sequenceNumber.toString()).build(); + KinesisClientRecord rawRecord = KinesisClientRecord.builder() + .data(rawRecordData) + .approximateArrivalTimestamp(approximateArrivalTime) + .partitionKey("pa-" + i) + .sequenceNumber(sequenceNumber.toString()) + .build(); rawRecords.add(rawRecord); } - when(shardDetector.shard(any())).thenReturn(Shard.builder().shardId("Shard-01") - .hashKeyRange(HashKeyRange.builder().startingHashKey(lowHashKey).endingHashKey(highHashKey).build()) - .build()); + when(shardDetector.shard(any())) + .thenReturn(Shard.builder() + .shardId("Shard-01") + .hashKeyRange(HashKeyRange.builder() + .startingHashKey(lowHashKey) + .endingHashKey(highHashKey) + .build()) + .build()); when(processRecordsInput.records()).thenReturn(rawRecords); ProcessTask processTask = makeProcessTask(processRecordsInput, aggregatorUtil, false); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, new ExtendedSequenceNumber(sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber( + sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), new ExtendedSequenceNumber(sequenceNumber.toString(), 0L)); assertThat(outcome.processRecordsCall.records(), equalTo(expectedRecords)); @@ -443,38 +500,32 @@ public class ProcessTaskTest { final String pk = UUID.randomUUID().toString(); final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS)); - //Payload set to SchemaRegistry encoded data and schema to null - //to mimic Schema Registry encoded message from Kinesis stream. - final KinesisClientRecord schemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); + // Payload set to SchemaRegistry encoded data and schema to null + // to mimic Schema Registry encoded message from Kinesis stream. + final KinesisClientRecord schemaRegistryRecord = makeKinesisClientRecord( + pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); - final KinesisClientRecord nonSchemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); + final KinesisClientRecord nonSchemaRegistryRecord = makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); - when(processRecordsInput.records()) - .thenReturn( - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ) - ); + when(processRecordsInput.records()).thenReturn(ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord)); doReturn(true).when(glueSchemaRegistryDeserializer).canDeserialize(SCHEMA_REGISTRY_PAYLOAD); doReturn(TEST_DATA).when(glueSchemaRegistryDeserializer).getData(SCHEMA_REGISTRY_PAYLOAD); doReturn(SCHEMA_REGISTRY_SCHEMA).when(glueSchemaRegistryDeserializer).getSchema(SCHEMA_REGISTRY_PAYLOAD); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), - new ExtendedSequenceNumber(previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), + new ExtendedSequenceNumber( + previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); - KinesisClientRecord decodedSchemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(TEST_DATA), SCHEMA_REGISTRY_SCHEMA); + KinesisClientRecord decodedSchemaRegistryRecord = makeKinesisClientRecord( + pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(TEST_DATA), SCHEMA_REGISTRY_SCHEMA); List expectedRecords = - ImmutableList.of( - decodedSchemaRegistryRecord, - nonSchemaRegistryRecord - ); + ImmutableList.of(decodedSchemaRegistryRecord, nonSchemaRegistryRecord); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(expectedRecords, actualRecords); @@ -491,35 +542,29 @@ public class ProcessTaskTest { final String pk = UUID.randomUUID().toString(); final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS)); - //Payload set to SchemaRegistry encoded data and schema to null - //to mimic Schema Registry encoded message from Kinesis stream. - final KinesisClientRecord schemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); + // Payload set to SchemaRegistry encoded data and schema to null + // to mimic Schema Registry encoded message from Kinesis stream. + final KinesisClientRecord schemaRegistryRecord = makeKinesisClientRecord( + pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); - final KinesisClientRecord nonSchemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); + final KinesisClientRecord nonSchemaRegistryRecord = makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); - when(processRecordsInput.records()) - .thenReturn( - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ) - ); + when(processRecordsInput.records()).thenReturn(ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord)); doThrow(new RuntimeException("Invalid data")) - .when(glueSchemaRegistryDeserializer).canDeserialize(SCHEMA_REGISTRY_PAYLOAD); + .when(glueSchemaRegistryDeserializer) + .canDeserialize(SCHEMA_REGISTRY_PAYLOAD); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), - new ExtendedSequenceNumber(previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), + new ExtendedSequenceNumber( + previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); - List expectedRecords = - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ); + List expectedRecords = ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(expectedRecords, actualRecords); } @@ -532,61 +577,66 @@ public class ProcessTaskTest { final String pk = UUID.randomUUID().toString(); final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS)); - //Payload set to SchemaRegistry encoded data and schema to null - //to mimic Schema Registry encoded message from Kinesis stream. - final KinesisClientRecord schemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); + // Payload set to SchemaRegistry encoded data and schema to null + // to mimic Schema Registry encoded message from Kinesis stream. + final KinesisClientRecord schemaRegistryRecord = makeKinesisClientRecord( + pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); - final KinesisClientRecord nonSchemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); + final KinesisClientRecord nonSchemaRegistryRecord = makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); - when(processRecordsInput.records()) - .thenReturn( - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ) - ); + when(processRecordsInput.records()).thenReturn(ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord)); - doReturn(true) - .when(glueSchemaRegistryDeserializer).canDeserialize(SCHEMA_REGISTRY_PAYLOAD); + doReturn(true).when(glueSchemaRegistryDeserializer).canDeserialize(SCHEMA_REGISTRY_PAYLOAD); doThrow(new RuntimeException("Cannot decode data")) - .when(glueSchemaRegistryDeserializer).getData(SCHEMA_REGISTRY_PAYLOAD); + .when(glueSchemaRegistryDeserializer) + .getData(SCHEMA_REGISTRY_PAYLOAD); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), - new ExtendedSequenceNumber(previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), + new ExtendedSequenceNumber( + previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); - List expectedRecords = - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ); + List expectedRecords = ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(expectedRecords, actualRecords); } - private KinesisClientRecord createAndRegisterAggregatedRecord(BigInteger sequenceNumber, - AggregatedRecord.Builder aggregatedRecord, int i, Instant approximateArrivalTime) { + private KinesisClientRecord createAndRegisterAggregatedRecord( + BigInteger sequenceNumber, + AggregatedRecord.Builder aggregatedRecord, + int i, + Instant approximateArrivalTime) { byte[] dataArray = new byte[1024]; ThreadLocalRandom.current().nextBytes(dataArray); ByteBuffer data = ByteBuffer.wrap(dataArray); - KinesisClientRecord expectedRecord = KinesisClientRecord.builder().partitionKey("p-" + i) - .sequenceNumber(sequenceNumber.toString()).approximateArrivalTimestamp(approximateArrivalTime) - .data(data).subSequenceNumber(i).aggregated(true).build(); + KinesisClientRecord expectedRecord = KinesisClientRecord.builder() + .partitionKey("p-" + i) + .sequenceNumber(sequenceNumber.toString()) + .approximateArrivalTimestamp(approximateArrivalTime) + .data(data) + .subSequenceNumber(i) + .aggregated(true) + .build(); - Messages.Record kplRecord = Messages.Record.newBuilder().setData(ByteString.copyFrom(dataArray)) - .setPartitionKeyIndex(i).build(); + Messages.Record kplRecord = Messages.Record.newBuilder() + .setData(ByteString.copyFrom(dataArray)) + .setPartitionKeyIndex(i) + .build(); aggregatedRecord.addPartitionKeyTable(expectedRecord.partitionKey()).addRecords(kplRecord); return expectedRecord; } private enum RecordRangeState { - BELOW_RANGE, IN_RANGE, ABOVE_RANGE + BELOW_RANGE, + IN_RANGE, + ABOVE_RANGE } @Getter @@ -600,7 +650,10 @@ public class ProcessTaskTest { ControlledHashAggregatorUtil(String lowHashKey, String highHashKey) { this.lowHashKey = new BigInteger(lowHashKey); this.highHashKey = new BigInteger(highHashKey); - this.width = this.highHashKey.subtract(this.lowHashKey).mod(BigInteger.valueOf(Long.MAX_VALUE)).longValue() + this.width = this.highHashKey + .subtract(this.lowHashKey) + .mod(BigInteger.valueOf(Long.MAX_VALUE)) + .longValue() - 1; } @@ -626,39 +679,54 @@ public class ProcessTaskTest { assertThat(rangeState, not(nullValue())); switch (rangeState) { - case BELOW_RANGE: - return lowHashKey.subtract(BigInteger.valueOf(ThreadLocalRandom.current().nextInt()).abs()); - case IN_RANGE: - return lowHashKey.add(BigInteger.valueOf(ThreadLocalRandom.current().nextLong(width))); - case ABOVE_RANGE: - return highHashKey.add(BigInteger.ONE) - .add(BigInteger.valueOf(ThreadLocalRandom.current().nextInt()).abs()); - default: - throw new IllegalStateException("Unknown range state: " + rangeState); + case BELOW_RANGE: + return lowHashKey.subtract( + BigInteger.valueOf(ThreadLocalRandom.current().nextInt()) + .abs()); + case IN_RANGE: + return lowHashKey.add( + BigInteger.valueOf(ThreadLocalRandom.current().nextLong(width))); + case ABOVE_RANGE: + return highHashKey + .add(BigInteger.ONE) + .add(BigInteger.valueOf(ThreadLocalRandom.current().nextInt()) + .abs()); + default: + throw new IllegalStateException("Unknown range state: " + rangeState); } } } private ShardRecordProcessorOutcome testWithRecord(KinesisClientRecord record) { - return testWithRecords(Collections.singletonList(record), ExtendedSequenceNumber.TRIM_HORIZON, + return testWithRecords( + Collections.singletonList(record), + ExtendedSequenceNumber.TRIM_HORIZON, ExtendedSequenceNumber.TRIM_HORIZON); } - private ShardRecordProcessorOutcome testWithRecords(List records, - ExtendedSequenceNumber lastCheckpointValue, ExtendedSequenceNumber largestPermittedCheckpointValue) { + private ShardRecordProcessorOutcome testWithRecords( + List records, + ExtendedSequenceNumber lastCheckpointValue, + ExtendedSequenceNumber largestPermittedCheckpointValue) { return testWithRecords(records, lastCheckpointValue, largestPermittedCheckpointValue, new AggregatorUtil()); } - private ShardRecordProcessorOutcome testWithRecords(List records, ExtendedSequenceNumber lastCheckpointValue, - ExtendedSequenceNumber largestPermittedCheckpointValue, AggregatorUtil aggregatorUtil) { + private ShardRecordProcessorOutcome testWithRecords( + List records, + ExtendedSequenceNumber lastCheckpointValue, + ExtendedSequenceNumber largestPermittedCheckpointValue, + AggregatorUtil aggregatorUtil) { when(processRecordsInput.records()).thenReturn(records); return testWithRecords( makeProcessTask(processRecordsInput, aggregatorUtil, skipShardSyncAtWorkerInitializationIfLeasesExist), - lastCheckpointValue, largestPermittedCheckpointValue); + lastCheckpointValue, + largestPermittedCheckpointValue); } - private ShardRecordProcessorOutcome testWithRecords(ProcessTask processTask, ExtendedSequenceNumber lastCheckpointValue, - ExtendedSequenceNumber largestPermittedCheckpointValue) { + private ShardRecordProcessorOutcome testWithRecords( + ProcessTask processTask, + ExtendedSequenceNumber lastCheckpointValue, + ExtendedSequenceNumber largestPermittedCheckpointValue) { when(checkpointer.lastCheckpointValue()).thenReturn(lastCheckpointValue); when(checkpointer.largestPermittedCheckpointValue()).thenReturn(largestPermittedCheckpointValue); processTask.call(); @@ -671,12 +739,11 @@ public class ProcessTaskTest { verify(checkpointer).largestPermittedCheckpointValue(esnCaptor.capture()); return new ShardRecordProcessorOutcome(recordsCaptor.getValue(), esnCaptor.getValue()); - } /** * See the KPL documentation on GitHub for more details about the binary format. - * + * * @param pk * Partition key to use. All the records will have the same partition key. * @return ByteBuffer containing the serialized form of the aggregated record, along with the necessary header and @@ -684,13 +751,20 @@ public class ProcessTaskTest { */ private static ByteBuffer generateAggregatedRecord(String pk) { ByteBuffer bb = ByteBuffer.allocate(1024); - bb.put(new byte[] { -13, -119, -102, -62 }); + bb.put(new byte[] {-13, -119, -102, -62}); - Messages.Record r = Messages.Record.newBuilder().setData(ByteString.copyFrom(TEST_DATA)).setPartitionKeyIndex(0) + Messages.Record r = Messages.Record.newBuilder() + .setData(ByteString.copyFrom(TEST_DATA)) + .setPartitionKeyIndex(0) .build(); - byte[] payload = AggregatedRecord.newBuilder().addPartitionKeyTable(pk).addRecords(r).addRecords(r) - .addRecords(r).build().toByteArray(); + byte[] payload = AggregatedRecord.newBuilder() + .addPartitionKeyTable(pk) + .addRecords(r) + .addRecords(r) + .addRecords(r) + .build() + .toByteArray(); bb.put(payload); bb.put(md5(payload)); @@ -699,13 +773,21 @@ public class ProcessTaskTest { return bb; } - private static List generateConsecutiveRecords(int numberOfRecords, String partitionKey, ByteBuffer data, - Date arrivalTimestamp, BigInteger startSequenceNumber) { + private static List generateConsecutiveRecords( + int numberOfRecords, + String partitionKey, + ByteBuffer data, + Date arrivalTimestamp, + BigInteger startSequenceNumber) { List records = new ArrayList<>(); for (int i = 0; i < numberOfRecords; ++i) { String seqNum = startSequenceNumber.add(BigInteger.valueOf(i)).toString(); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey(partitionKey).data(data) - .sequenceNumber(seqNum).approximateArrivalTimestamp(arrivalTimestamp.toInstant()).build(); + KinesisClientRecord record = KinesisClientRecord.builder() + .partitionKey(partitionKey) + .data(data) + .sequenceNumber(seqNum) + .approximateArrivalTimestamp(arrivalTimestamp.toInstant()) + .build(); records.add(record); } return records; @@ -742,11 +824,11 @@ public class ProcessTaskTest { if (expected == null) { matchers = nullValue(TaskResult.class); } else { - matchers = allOf(notNullValue(TaskResult.class), + matchers = allOf( + notNullValue(TaskResult.class), hasProperty("shardEndReached", equalTo(expected.isShardEndReached())), hasProperty("exception", equalTo(expected.getException()))); } - } @Override diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ReshardIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ReshardIntegrationTest.java new file mode 100644 index 00000000..38fab2ac --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ReshardIntegrationTest.java @@ -0,0 +1,15 @@ +package software.amazon.kinesis.lifecycle; + +import org.junit.Test; +import software.amazon.kinesis.application.TestConsumer; +import software.amazon.kinesis.config.KCLAppConfig; +import software.amazon.kinesis.config.ReleaseCanaryStreamingReshardingTestConfig; + +public class ReshardIntegrationTest { + @Test + public void kclReleaseCanaryStreamingReshardingTest() throws Exception { + KCLAppConfig consumerConfig = new ReleaseCanaryStreamingReshardingTestConfig(); + TestConsumer consumer = new TestConsumer(consumerConfig); + consumer.run(); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriberTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriberTest.java index 78e09fa1..440741b2 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriberTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriberTest.java @@ -14,6 +14,45 @@ */ package software.amazon.kinesis.lifecycle; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import lombok.NonNull; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import software.amazon.kinesis.common.RequestDetails; +import software.amazon.kinesis.leases.ShardInfo; +import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; +import software.amazon.kinesis.retrieval.KinesisClientRecord; +import software.amazon.kinesis.retrieval.RecordsDeliveryAck; +import software.amazon.kinesis.retrieval.RecordsPublisher; +import software.amazon.kinesis.retrieval.RecordsRetrieved; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.nullValue; import static org.junit.Assert.assertEquals; @@ -29,48 +68,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static software.amazon.kinesis.utils.ProcessRecordsInputMatcher.eqProcessRecordsInput; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.stream.Stream; - -import org.apache.commons.lang3.StringUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.NonNull; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.common.RequestDetails; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.RecordsDeliveryAck; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RecordsRetrieved; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - @Slf4j @RunWith(MockitoJUnitRunner.class) public class ShardConsumerSubscriberTest { @@ -79,10 +76,13 @@ public class ShardConsumerSubscriberTest { private static final String TERMINAL_MARKER = "Terminal"; + private static final long DEFAULT_NOTIFIER_TIMEOUT = 5000L; + private final RequestDetails lastSuccessfulRequestDetails = new RequestDetails(); @Mock private ShardConsumer shardConsumer; + @Mock private RecordsRetrieved recordsRetrieved; @@ -99,16 +99,22 @@ public class ShardConsumerSubscriberTest { @Before public void before() { - executorService = Executors.newFixedThreadPool(8, new ThreadFactoryBuilder() - .setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build()); + executorService = Executors.newFixedThreadPool( + 8, + new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build()); recordsPublisher = new TestPublisher(); - ShardInfo shardInfo = new ShardInfo("shard-001", "", Collections.emptyList(), - ExtendedSequenceNumber.TRIM_HORIZON); + ShardInfo shardInfo = + new ShardInfo("shard-001", "", Collections.emptyList(), ExtendedSequenceNumber.TRIM_HORIZON); when(shardConsumer.shardInfo()).thenReturn(shardInfo); - processRecordsInput = ProcessRecordsInput.builder().records(Collections.emptyList()) - .cacheEntryTime(Instant.now()).build(); + processRecordsInput = ProcessRecordsInput.builder() + .records(Collections.emptyList()) + .cacheEntryTime(Instant.now()) + .build(); subscriber = new ShardConsumerSubscriber(recordsPublisher, executorService, bufferSize, shardConsumer, 0); when(recordsRetrieved.processRecordsInput()).thenReturn(processRecordsInput); @@ -125,10 +131,7 @@ public class ShardConsumerSubscriberTest { setupNotifierAnswer(1); - synchronized (processedNotifier) { - subscriber.startSubscriptions(); - processedNotifier.wait(5000); - } + startSubscriptionsAndWait(); verify(shardConsumer).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); } @@ -139,12 +142,10 @@ public class ShardConsumerSubscriberTest { setupNotifierAnswer(recordsPublisher.responses.size()); - synchronized (processedNotifier) { - subscriber.startSubscriptions(); - processedNotifier.wait(5000); - } + startSubscriptionsAndWait(); - verify(shardConsumer, times(100)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(100)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); } @Test @@ -154,33 +155,32 @@ public class ShardConsumerSubscriberTest { Throwable testException = new Throwable("ShardConsumerError"); doAnswer(new Answer() { - int expectedInvocations = recordsPublisher.responses.size(); + int expectedInvocations = recordsPublisher.responses.size(); - @Override - public Object answer(InvocationOnMock invocation) throws Throwable { - expectedInvocations--; - if (expectedInvocations == 10) { - throw testException; - } - if (expectedInvocations <= 0) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + expectedInvocations--; + if (expectedInvocations == 10) { + throw testException; + } + if (expectedInvocations <= 0) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; } - } - return null; - } - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); - synchronized (processedNotifier) { - subscriber.startSubscriptions(); - processedNotifier.wait(5000); - } + startSubscriptionsAndWait(); assertThat(subscriber.getAndResetDispatchFailure(), equalTo(testException)); assertThat(subscriber.getAndResetDispatchFailure(), nullValue()); - verify(shardConsumer, times(20)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); - + verify(shardConsumer, times(20)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); } @Test @@ -192,10 +192,7 @@ public class ShardConsumerSubscriberTest { setupNotifierAnswer(10); - synchronized (processedNotifier) { - subscriber.startSubscriptions(); - processedNotifier.wait(5000); - } + startSubscriptionsAndWait(); for (int attempts = 0; attempts < 10; attempts++) { if (subscriber.retrievalFailure() != null) { @@ -204,7 +201,8 @@ public class ShardConsumerSubscriberTest { Thread.sleep(10); } - verify(shardConsumer, times(10)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(10)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); assertThat(subscriber.retrievalFailure(), equalTo(expected)); } @@ -220,10 +218,7 @@ public class ShardConsumerSubscriberTest { setupNotifierAnswer(10); - synchronized (processedNotifier) { - subscriber.startSubscriptions(); - processedNotifier.wait(5000); - } + startSubscriptionsAndWait(); for (int attempts = 0; attempts < 10; attempts++) { if (subscriber.retrievalFailure() != null) { @@ -236,18 +231,23 @@ public class ShardConsumerSubscriberTest { synchronized (processedNotifier) { assertThat(subscriber.healthCheck(100000), equalTo(expected)); - processedNotifier.wait(5000); + processedNotifier.wait(DEFAULT_NOTIFIER_TIMEOUT); } assertThat(recordsPublisher.restartedFrom, equalTo(edgeRecord)); - verify(shardConsumer, times(20)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(20)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); } @Test public void restartAfterRequestTimerExpiresTest() throws Exception { - executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder() - .setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build()); + executorService = Executors.newFixedThreadPool( + 1, + new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build()); subscriber = new ShardConsumerSubscriber(recordsPublisher, executorService, bufferSize, shardConsumer, 0); addUniqueItem(1); @@ -257,20 +257,20 @@ public class ShardConsumerSubscriberTest { List received = new ArrayList<>(); doAnswer(a -> { - ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); - received.add(input); - if (input.records().stream().anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); - } - } - return null; - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); + received.add(input); + if (input.records().stream() + .anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); - synchronized (processedNotifier) { - subscriber.startSubscriptions(); - processedNotifier.wait(5000); - } + startSubscriptionsAndWait(); synchronized (processedNotifier) { executorService.execute(() -> { @@ -290,7 +290,7 @@ public class ShardConsumerSubscriberTest { // // Wait for our blocking thread to control the thread in the executor. // - processedNotifier.wait(5000); + processedNotifier.wait(DEFAULT_NOTIFIER_TIMEOUT); } Stream.iterate(2, i -> i + 1).limit(97).forEach(this::addUniqueItem); @@ -301,22 +301,32 @@ public class ShardConsumerSubscriberTest { assertThat(subscriber.healthCheck(1), nullValue()); barrier.await(500, TimeUnit.MILLISECONDS); - processedNotifier.wait(5000); + processedNotifier.wait(DEFAULT_NOTIFIER_TIMEOUT); } - verify(shardConsumer, times(100)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(100)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); assertThat(received.size(), equalTo(recordsPublisher.responses.size())); - Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i), - eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput()))); - + Stream.iterate(0, i -> i + 1) + .limit(received.size()) + .forEach(i -> assertThat( + received.get(i), + eqProcessRecordsInput(recordsPublisher + .responses + .get(i) + .recordsRetrieved + .processRecordsInput()))); } @Test public void restartAfterRequestTimerExpiresWhenNotGettingRecordsAfterInitialization() throws Exception { - - executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder() - .setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build()); + executorService = Executors.newFixedThreadPool( + 1, + new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build()); // Mock record publisher which doesn't publish any records on first try which simulates any scenario which // causes first subscription try to fail. @@ -326,24 +336,26 @@ public class ShardConsumerSubscriberTest { List received = new ArrayList<>(); doAnswer(a -> { - ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); - received.add(input); - if (input.records().stream().anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); - } - } - return null; - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); + received.add(input); + if (input.records().stream() + .anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); // First try to start subscriptions. - synchronized (processedNotifier) { - subscriber.startSubscriptions(); - } + startSubscriptionsAndWait(100); // Verifying that there are no interactions with shardConsumer mock indicating no records were sent back and // subscription has not started correctly. - verify(shardConsumer, never()).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, never()) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); Stream.iterate(2, i -> i + 1).limit(98).forEach(this::addUniqueItem); @@ -363,47 +375,59 @@ public class ShardConsumerSubscriberTest { // Verify that received records in the subscriber are equal to the ones sent by the record publisher. assertThat(received.size(), equalTo(recordsPublisher.responses.size())); - Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i), - eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput()))); - + Stream.iterate(0, i -> i + 1) + .limit(received.size()) + .forEach(i -> assertThat( + received.get(i), + eqProcessRecordsInput(recordsPublisher + .responses + .get(i) + .recordsRetrieved + .processRecordsInput()))); } @Test public void restartAfterRequestTimerExpiresWhenInitialTaskExecutionIsRejected() throws Exception { - - executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder() - .setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build()); + executorService = Executors.newFixedThreadPool( + 1, + new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build()); ExecutorService failingService = spy(executorService); doAnswer(invocation -> directlyExecuteRunnable(invocation)) .doThrow(new RejectedExecutionException()) .doCallRealMethod() - .when(failingService).execute(any()); + .when(failingService) + .execute(any()); subscriber = new ShardConsumerSubscriber(recordsPublisher, failingService, bufferSize, shardConsumer, 0); addUniqueItem(1); List received = new ArrayList<>(); doAnswer(a -> { - ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); - received.add(input); - if (input.records().stream().anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); - } - } - return null; - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); + received.add(input); + if (input.records().stream() + .anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); // First try to start subscriptions. - synchronized (processedNotifier) { - subscriber.startSubscriptions(); - } + startSubscriptionsAndWait(100); // Verifying that there are no interactions with shardConsumer mock indicating no records were sent back and // subscription has not started correctly. - verify(shardConsumer, never()).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, never()) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); Stream.iterate(2, i -> i + 1).limit(98).forEach(this::addUniqueItem); @@ -419,13 +443,20 @@ public class ShardConsumerSubscriberTest { } // Verify that shardConsumer mock was called 100 times and all 100 input records are processed. - verify(shardConsumer, times(100)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(100)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); // Verify that received records in the subscriber are equal to the ones sent by the record publisher. assertThat(received.size(), equalTo(recordsPublisher.responses.size())); - Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i), - eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput()))); - + Stream.iterate(0, i -> i + 1) + .limit(received.size()) + .forEach(i -> assertThat( + received.get(i), + eqProcessRecordsInput(recordsPublisher + .responses + .get(i) + .recordsRetrieved + .processRecordsInput()))); } private Object directlyExecuteRunnable(InvocationOnMock invocation) { @@ -437,8 +468,11 @@ public class ShardConsumerSubscriberTest { private void addUniqueItem(int id) { RecordsRetrieved r = mock(RecordsRetrieved.class, "Record-" + id); - ProcessRecordsInput input = ProcessRecordsInput.builder().cacheEntryTime(Instant.now()) - .records(Collections.singletonList(KinesisClientRecord.builder().partitionKey("Record-" + id).build())) + ProcessRecordsInput input = ProcessRecordsInput.builder() + .cacheEntryTime(Instant.now()) + .records(Collections.singletonList(KinesisClientRecord.builder() + .partitionKey("Record-" + id) + .build())) .build(); when(r.processRecordsInput()).thenReturn(input); recordsPublisher.add(new ResponseItem(r)); @@ -447,9 +481,11 @@ public class ShardConsumerSubscriberTest { private ProcessRecordsInput addTerminalMarker(int id) { RecordsRetrieved terminalResponse = mock(RecordsRetrieved.class, TERMINAL_MARKER + "-" + id); ProcessRecordsInput terminalInput = ProcessRecordsInput.builder() - .records(Collections - .singletonList(KinesisClientRecord.builder().partitionKey(TERMINAL_MARKER + "-" + id).build())) - .cacheEntryTime(Instant.now()).build(); + .records(Collections.singletonList(KinesisClientRecord.builder() + .partitionKey(TERMINAL_MARKER + "-" + id) + .build())) + .cacheEntryTime(Instant.now()) + .build(); when(terminalResponse.processRecordsInput()).thenReturn(terminalInput); recordsPublisher.add(new ResponseItem(terminalResponse)); @@ -457,25 +493,39 @@ public class ShardConsumerSubscriberTest { } private void addItemsToReturn(int count) { - Stream.iterate(0, i -> i + 1).limit(count) + Stream.iterate(0, i -> i + 1) + .limit(count) .forEach(i -> recordsPublisher.add(new ResponseItem(recordsRetrieved))); } private void setupNotifierAnswer(int expected) { doAnswer(new Answer() { - int seen = expected; + int seen = expected; - @Override - public Object answer(InvocationOnMock invocation) throws Throwable { - seen--; - if (seen == 0) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + seen--; + if (seen == 0) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; } - } - return null; - } - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + } + + private void startSubscriptionsAndWait() throws InterruptedException { + startSubscriptionsAndWait(DEFAULT_NOTIFIER_TIMEOUT); + } + + private void startSubscriptionsAndWait(long timeout) throws InterruptedException { + synchronized (processedNotifier) { + subscriber.startSubscriptions(); + processedNotifier.wait(timeout); + } } private class ResponseItem { @@ -531,10 +581,9 @@ public class ShardConsumerSubscriberTest { } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, - InitialPositionInStreamExtended initialPositionInStreamExtended) { - - } + public void start( + ExtendedSequenceNumber extendedSequenceNumber, + InitialPositionInStreamExtended initialPositionInStreamExtended) {} @Override public void restartFrom(RecordsRetrieved recordsRetrieved) { @@ -547,18 +596,13 @@ public class ShardConsumerSubscriberTest { break; } } - } @Override - public void notify(RecordsDeliveryAck ack) { - - } + public void notify(RecordsDeliveryAck ack) {} @Override - public void shutdown() { - - } + public void shutdown() {} @Override public RequestDetails getLastSuccessfulRequestDetails() { @@ -610,7 +654,10 @@ public class ShardConsumerSubscriberTest { private int genericWarningLogged = 0; private int readTimeoutWarningLogged = 0; - TestShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize, + TestShardConsumerSubscriber( + RecordsPublisher recordsPublisher, + ExecutorService executorService, + int bufferSize, ShardConsumer shardConsumer, // Setup test expectations int readTimeoutsToIgnoreBeforeWarning) { @@ -632,8 +679,6 @@ public class ShardConsumerSubscriberTest { /** * Test to validate the warning message from ShardConsumer is not suppressed with the default configuration of 0 - * - * @throws Exception */ @Test public void noLoggingSuppressionNeededOnHappyPathTest() { @@ -643,8 +688,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 0; int expectedReadTimeoutLogs = 0; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -657,8 +702,6 @@ public class ShardConsumerSubscriberTest { /** * Test to validate the warning message from ShardConsumer is not suppressed with the default configuration of 0 - * - * @throws Exception */ @Test public void loggingNotSuppressedAfterTimeoutTest() { @@ -671,8 +714,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 0; int expectedReadTimeoutLogs = 2; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -686,8 +729,6 @@ public class ShardConsumerSubscriberTest { /** * Test to validate the warning message from ShardConsumer is successfully supressed if we only have intermittant * readTimeouts. - * - * @throws Exception */ @Test public void loggingSuppressedAfterIntermittentTimeoutTest() { @@ -699,8 +740,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 1; int expectedReadTimeoutLogs = 0; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -714,8 +755,6 @@ public class ShardConsumerSubscriberTest { /** * Test to validate the warning message from ShardConsumer is successfully logged if multiple sequential timeouts * occur. - * - * @throws Exception */ @Test public void loggingPartiallySuppressedAfterMultipleTimeoutTest() { @@ -728,8 +767,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 1; int expectedReadTimeoutLogs = 2; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicException(exceptionToThrow, consumer); mimicException(exceptionToThrow, consumer); @@ -742,8 +781,6 @@ public class ShardConsumerSubscriberTest { /** * Test to validate the warning message from ShardConsumer is successfully logged if sequential timeouts occur. - * - * @throws Exception */ @Test public void loggingPartiallySuppressedAfterConsecutiveTimeoutTest() { @@ -757,8 +794,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 2; int expectedReadTimeoutLogs = 3; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicException(exceptionToThrow, consumer); mimicException(exceptionToThrow, consumer); @@ -772,8 +809,6 @@ public class ShardConsumerSubscriberTest { /** * Test to validate the non-timeout warning message from ShardConsumer is not suppressed with the default * configuration of 0 - * - * @throws Exception */ @Test public void loggingNotSuppressedOnNonReadTimeoutExceptionNotIgnoringReadTimeoutsExceptionTest() { @@ -786,8 +821,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 0; int expectedReadTimeoutLogs = 0; int expectedGenericLogs = 2; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -801,12 +836,9 @@ public class ShardConsumerSubscriberTest { /** * Test to validate the non-timeout warning message from ShardConsumer is not suppressed with 2 ReadTimeouts to * ignore - * - * @throws Exception */ @Test public void loggingNotSuppressedOnNonReadTimeoutExceptionIgnoringReadTimeoutsTest() { - // We're not throwing a ReadTimeout, so no suppression is expected. // The test expects a non-ReadTimeout exception to be thrown on requests 3 and 5, and we expect logs on // each Non-ReadTimeout Exception, no matter what the number of ReadTimeoutsToIgnore we pass in, @@ -818,8 +850,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 2; int expectedReadTimeoutLogs = 0; int expectedGenericLogs = 2; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -841,5 +873,4 @@ public class ShardConsumerSubscriberTest { // restart subscriptions to allow further requests to be mimiced consumer.startSubscriptions(); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java index 46677fb9..daab8efe 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java @@ -15,34 +15,13 @@ package software.amazon.kinesis.lifecycle; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - import java.time.Instant; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -51,8 +30,11 @@ import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import lombok.extern.slf4j.Slf4j; import org.junit.After; import org.junit.Before; import org.junit.Ignore; @@ -60,26 +42,50 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.Mock; +import org.mockito.MockitoAnnotations; import org.mockito.invocation.InvocationOnMock; import org.mockito.runners.MockitoJUnitRunner; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.RequestDetails; import software.amazon.kinesis.leases.ShardInfo; +import software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.lifecycle.events.TaskExecutionListenerInput; -import software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; import software.amazon.kinesis.retrieval.RecordsDeliveryAck; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.RecordsRetrieved; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + /** * Unit tests of {@link ShardConsumer}. */ @@ -98,42 +104,61 @@ public class ShardConsumerTest { private TaskExecutionListenerInput shutdownRequestedAwaitTaskInput; private ExecutorService executorService; + @Mock private RecordsPublisher recordsPublisher; + @Mock private ShutdownNotification shutdownNotification; + @Mock private ConsumerState blockedOnParentsState; + @Mock private ConsumerTask blockedOnParentsTask; + @Mock private ConsumerState initialState; + @Mock private ConsumerTask initializeTask; + @Mock private ConsumerState processingState; + @Mock private ConsumerTask processingTask; + @Mock private ConsumerState shutdownState; + @Mock private ConsumerTask shutdownTask; + @Mock private TaskResult initializeTaskResult; + @Mock private TaskResult processingTaskResult; + @Mock private TaskResult blockOnParentsTaskResult; + @Mock private ConsumerState shutdownCompleteState; + @Mock private ShardConsumerArgument shardConsumerArgument; + @Mock private ConsumerState shutdownRequestedState; + @Mock private ConsumerTask shutdownRequestedTask; + @Mock private ConsumerState shutdownRequestedAwaitState; + @Mock private TaskExecutionListener taskExecutionListener; @@ -146,28 +171,46 @@ public class ShardConsumerTest { @Before public void before() { + MockitoAnnotations.initMocks(this); shardInfo = new ShardInfo(shardId, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - ThreadFactory factory = new ThreadFactoryBuilder().setNameFormat("test-" + testName.getMethodName() + "-%04d") - .setDaemon(true).build(); + ThreadFactory factory = new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build(); executorService = new ThreadPoolExecutor(4, 4, 1, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), factory); - processRecordsInput = ProcessRecordsInput.builder().isAtShardEnd(false).cacheEntryTime(Instant.now()) - .millisBehindLatest(1000L).records(Collections.emptyList()).build(); - initialTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo).taskType(TaskType.INITIALIZE) + processRecordsInput = ProcessRecordsInput.builder() + .isAtShardEnd(false) + .cacheEntryTime(Instant.now()) + .millisBehindLatest(1000L) + .records(Collections.emptyList()) .build(); - processTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo).taskType(TaskType.PROCESS).build(); - shutdownRequestedTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo) - .taskType(TaskType.SHUTDOWN_NOTIFICATION).build(); - shutdownRequestedAwaitTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo) - .taskType(TaskType.SHUTDOWN_COMPLETE).build(); - shutdownTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo).taskType(TaskType.SHUTDOWN) + initialTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.INITIALIZE) + .build(); + processTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.PROCESS) + .build(); + shutdownRequestedTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.SHUTDOWN_NOTIFICATION) + .build(); + shutdownRequestedAwaitTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.SHUTDOWN_COMPLETE) + .build(); + shutdownTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.SHUTDOWN) .build(); } @After public void after() { List remainder = executorService.shutdownNow(); - assertThat(remainder.isEmpty(), equalTo(true)); + assertTrue(remainder.isEmpty()); } private class TestPublisher implements RecordsPublisher { @@ -184,32 +227,31 @@ public class ShardConsumerTest { TestPublisher(boolean enableCancelAwait) { doAnswer(a -> { - requestBarrier.await(); - return null; - }).when(subscription).request(anyLong()); + requestBarrier.await(); + return null; + }) + .when(subscription) + .request(anyLong()); doAnswer(a -> { - if (enableCancelAwait) { - requestBarrier.await(); - } - return null; - }).when(subscription).cancel(); + if (enableCancelAwait) { + requestBarrier.await(); + } + return null; + }) + .when(subscription) + .cancel(); } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, - InitialPositionInStreamExtended initialPositionInStreamExtended) { - - } + public void start( + ExtendedSequenceNumber extendedSequenceNumber, + InitialPositionInStreamExtended initialPositionInStreamExtended) {} @Override - public void notify(RecordsDeliveryAck ack) { - - } + public void notify(RecordsDeliveryAck ack) {} @Override - public void shutdown() { - - } + public void shutdown() {} @Override public RequestDetails getLastSuccessfulRequestDetails() { @@ -228,9 +270,7 @@ public class ShardConsumerTest { } @Override - public void restartFrom(RecordsRetrieved recordsRetrieved) { - - } + public void restartFrom(RecordsRetrieved recordsRetrieved) {} public void awaitSubscription() throws InterruptedException, BrokenBarrierException { barrier.await(); @@ -267,8 +307,7 @@ public class ShardConsumerTest { mockSuccessfulShutdown(null); TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, logWarningForTaskAfterMillis, - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(cache); boolean initComplete = false; while (!initComplete) { @@ -299,9 +338,13 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(2)).beforeTaskExecution(processTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verify(taskExecutionListener, times(2)).afterTaskExecution(processTaskInput); @@ -321,8 +364,7 @@ public class ShardConsumerTest { mockSuccessfulShutdown(null); TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, logWarningForTaskAfterMillis, - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(cache); boolean initComplete = false; while (!initComplete) { @@ -341,7 +383,7 @@ public class ShardConsumerTest { // This will block if a lock is held on ShardConsumer#this // consumer.executeLifecycle(); - assertThat(consumer.isShutdown(), equalTo(false)); + assertFalse(consumer.isShutdown()); log.debug("Release processing task interlock"); awaitAndResetBarrier(processingTaskInterlock); @@ -358,9 +400,13 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(1)).beforeTaskExecution(processTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verify(taskExecutionListener, times(1)).afterTaskExecution(processTaskInput); @@ -370,7 +416,6 @@ public class ShardConsumerTest { @Test public void testDataArrivesAfterProcessing2() throws Exception { - CyclicBarrier taskCallBarrier = new CyclicBarrier(2); mockSuccessfulInitialize(null); @@ -380,8 +425,7 @@ public class ShardConsumerTest { mockSuccessfulShutdown(null); TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, logWarningForTaskAfterMillis, - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(cache); boolean initComplete = false; while (!initComplete) { @@ -425,9 +469,13 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(3)).beforeTaskExecution(processTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verify(taskExecutionListener, times(3)).afterTaskExecution(processTaskInput); @@ -435,15 +483,13 @@ public class ShardConsumerTest { verifyNoMoreInteractions(taskExecutionListener); } - @SuppressWarnings("unchecked") @Test @Ignore public final void testInitializationStateUponFailure() throws Exception { - ShardConsumer consumer = new ShardConsumer(recordsPublisher, executorService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, initialState, Function.identity(), 1, - taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(recordsPublisher); - when(initialState.createTask(eq(shardConsumerArgument), eq(consumer), any())).thenReturn(initializeTask); + when(initialState.createTask(eq(shardConsumerArgument), eq(consumer), any())) + .thenReturn(initializeTask); when(initializeTask.call()).thenReturn(new TaskResult(new Exception("Bad"))); when(initializeTask.taskType()).thenReturn(TaskType.INITIALIZE); when(initialState.failureTransition()).thenReturn(initialState); @@ -468,17 +514,13 @@ public class ShardConsumerTest { /** * Test method to verify consumer undergoes the transition WAITING_ON_PARENT_SHARDS -> INITIALIZING -> PROCESSING */ - @SuppressWarnings("unchecked") @Test - public final void testSuccessfulConsumerStateTransition() throws Exception { + public final void testSuccessfulConsumerStateTransition() { ExecutorService directExecutorService = spy(executorService); - doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .when(directExecutorService).execute(any()); + doAnswer(this::directlyExecuteRunnable).when(directExecutorService).execute(any()); - ShardConsumer consumer = new ShardConsumer(recordsPublisher, directExecutorService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, blockedOnParentsState, - t -> t, 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(directExecutorService, blockedOnParentsState); mockSuccessfulUnblockOnParents(); mockSuccessfulInitializeWithFailureTransition(); @@ -494,7 +536,9 @@ public class ShardConsumerTest { } } while (--arbitraryExecutionCount > 0); - assertEquals(ShardConsumerState.PROCESSING.consumerState().state(), consumer.currentState().state()); + assertEquals( + ShardConsumerState.PROCESSING.consumerState().state(), + consumer.currentState().state()); verify(directExecutorService, times(2)).execute(any()); } @@ -502,22 +546,20 @@ public class ShardConsumerTest { * Test method to verify consumer does not transition to PROCESSING from WAITING_ON_PARENT_SHARDS when * INITIALIZING tasks gets rejected. */ - @SuppressWarnings("unchecked") @Test public final void testConsumerNotTransitionsToProcessingWhenInitializationFails() { ExecutorService failingService = spy(executorService); - ShardConsumer consumer = new ShardConsumer(recordsPublisher, failingService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, blockedOnParentsState, - t -> t, 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(failingService, blockedOnParentsState); mockSuccessfulUnblockOnParents(); mockSuccessfulInitializeWithFailureTransition(); mockSuccessfulProcessing(null); // Failing the initialization task and all other attempts after that. - doAnswer(invocation -> directlyExecuteRunnable(invocation)) + doAnswer(this::directlyExecuteRunnable) .doThrow(new RejectedExecutionException()) - .when(failingService).execute(any()); + .when(failingService) + .execute(any()); int arbitraryExecutionCount = 5; do { @@ -529,7 +571,9 @@ public class ShardConsumerTest { } } while (--arbitraryExecutionCount > 0); - assertEquals(ShardConsumerState.INITIALIZING.consumerState().state(), consumer.currentState().state()); + assertEquals( + ShardConsumerState.INITIALIZING.consumerState().state(), + consumer.currentState().state()); verify(failingService, times(5)).execute(any()); } @@ -537,25 +581,23 @@ public class ShardConsumerTest { * Test method to verify consumer transition to PROCESSING from WAITING_ON_PARENT_SHARDS with * intermittent INITIALIZING task rejections. */ - @SuppressWarnings("unchecked") @Test public final void testConsumerTransitionsToProcessingWithIntermittentInitializationFailures() { ExecutorService failingService = spy(executorService); - ShardConsumer consumer = new ShardConsumer(recordsPublisher, failingService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, blockedOnParentsState, - t -> t, 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(failingService, blockedOnParentsState); mockSuccessfulUnblockOnParents(); mockSuccessfulInitializeWithFailureTransition(); mockSuccessfulProcessing(null); // Failing the initialization task and few other attempts after that. - doAnswer(invocation -> directlyExecuteRunnable(invocation)) + doAnswer(this::directlyExecuteRunnable) .doThrow(new RejectedExecutionException()) .doThrow(new RejectedExecutionException()) .doThrow(new RejectedExecutionException()) - .doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .when(failingService).execute(any()); + .doAnswer(this::directlyExecuteRunnable) + .when(failingService) + .execute(any()); int arbitraryExecutionCount = 6; do { @@ -567,27 +609,25 @@ public class ShardConsumerTest { } } while (--arbitraryExecutionCount > 0); - assertEquals(ShardConsumerState.PROCESSING.consumerState().state(), consumer.currentState().state()); + assertEquals( + ShardConsumerState.PROCESSING.consumerState().state(), + consumer.currentState().state()); verify(failingService, times(5)).execute(any()); } /** * Test method to verify consumer does not transition to INITIALIZING when WAITING_ON_PARENT_SHARDS task rejected. */ - @SuppressWarnings("unchecked") @Test public final void testConsumerNotTransitionsToInitializingWhenWaitingOnParentsFails() { ExecutorService failingService = spy(executorService); - ShardConsumer consumer = new ShardConsumer(recordsPublisher, failingService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, blockedOnParentsState, - t -> t, 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(failingService, blockedOnParentsState); mockSuccessfulUnblockOnParentsWithFailureTransition(); mockSuccessfulInitializeWithFailureTransition(); // Failing the waiting_on_parents task and few other attempts after that. - doThrow(new RejectedExecutionException()) - .when(failingService).execute(any()); + doThrow(new RejectedExecutionException()).when(failingService).execute(any()); int arbitraryExecutionCount = 5; do { @@ -599,20 +639,19 @@ public class ShardConsumerTest { } } while (--arbitraryExecutionCount > 0); - assertEquals(ShardConsumerState.WAITING_ON_PARENT_SHARDS.consumerState().state(), consumer.currentState().state()); + assertEquals( + ShardConsumerState.WAITING_ON_PARENT_SHARDS.consumerState().state(), + consumer.currentState().state()); verify(failingService, times(5)).execute(any()); } /** * Test method to verify consumer stays in INITIALIZING state when InitializationTask fails. */ - @SuppressWarnings("unchecked") @Test(expected = RejectedExecutionException.class) public final void testInitializationStateUponSubmissionFailure() throws Exception { - ExecutorService failingService = mock(ExecutorService.class); - ShardConsumer consumer = new ShardConsumer(recordsPublisher, failingService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, initialState, t -> t, 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(failingService, initialState); doThrow(new RejectedExecutionException()).when(failingService).execute(any()); @@ -625,8 +664,7 @@ public class ShardConsumerTest { @Test public void testErrorThrowableInInitialization() throws Exception { - ShardConsumer consumer = new ShardConsumer(recordsPublisher, executorService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, initialState, t -> t, 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(recordsPublisher); when(initialState.createTask(any(), any(), any())).thenReturn(initializeTask); when(initialState.taskType()).thenReturn(TaskType.INITIALIZE); @@ -645,12 +683,10 @@ public class ShardConsumerTest { @Test public void testRequestedShutdownWhileQuiet() throws Exception { - CyclicBarrier taskBarrier = new CyclicBarrier(2); TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, logWarningForTaskAfterMillis, - shardConsumerArgument, initialState, t -> t, 1, taskExecutionListener, 0); + final ShardConsumer consumer = createShardConsumer(cache); mockSuccessfulInitialize(null); @@ -664,12 +700,14 @@ public class ShardConsumerTest { when(shutdownRequestedState.shutdownTransition(eq(ShutdownReason.REQUESTED))) .thenReturn(shutdownRequestedAwaitState); - when(shutdownRequestedState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))).thenReturn(shutdownState); + when(shutdownRequestedState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))) + .thenReturn(shutdownState); when(shutdownRequestedAwaitState.requiresDataAvailability()).thenReturn(false); when(shutdownRequestedAwaitState.createTask(any(), any(), any())).thenReturn(null); when(shutdownRequestedAwaitState.shutdownTransition(eq(ShutdownReason.REQUESTED))) .thenReturn(shutdownRequestedState); - when(shutdownRequestedAwaitState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))).thenReturn(shutdownState); + when(shutdownRequestedAwaitState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))) + .thenReturn(shutdownState); when(shutdownRequestedAwaitState.taskType()).thenReturn(TaskType.SHUTDOWN_COMPLETE); mockSuccessfulShutdown(null); @@ -692,15 +730,15 @@ public class ShardConsumerTest { consumer.gracefulShutdown(shutdownNotification); boolean shutdownComplete = consumer.shutdownComplete().get(); - assertThat(shutdownComplete, equalTo(false)); + assertFalse(shutdownComplete); shutdownComplete = consumer.shutdownComplete().get(); - assertThat(shutdownComplete, equalTo(false)); + assertFalse(shutdownComplete); consumer.leaseLost(); shutdownComplete = consumer.shutdownComplete().get(); - assertThat(shutdownComplete, equalTo(false)); + assertFalse(shutdownComplete); shutdownComplete = consumer.shutdownComplete().get(); - assertThat(shutdownComplete, equalTo(true)); + assertTrue(shutdownComplete); verify(processingState, times(2)).createTask(any(), any(), any()); verify(shutdownRequestedState, never()).shutdownTransition(eq(ShutdownReason.LEASE_LOST)); @@ -714,12 +752,21 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownRequestedAwaitTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownRequestedTaskInput = shutdownRequestedTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownRequestedTaskInput = shutdownRequestedTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); // No task is created/run for this shutdownRequestedAwaitState, so there's no task outcome. + // shutdownNotification.shutdownComplete() should only be called for gracefulShutdown + verify(shutdownNotification, times(1)).shutdownComplete(); + verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verify(taskExecutionListener, times(2)).afterTaskExecution(processTaskInput); verify(taskExecutionListener, times(1)).afterTaskExecution(shutdownRequestedTaskInput); @@ -732,8 +779,17 @@ public class ShardConsumerTest { public void testExceptionInProcessingStopsRequests() throws Exception { TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, Optional.of(1L), - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener, 0); + ShardConsumer consumer = new ShardConsumer( + cache, + executorService, + shardInfo, + Optional.of(1L), + shardConsumerArgument, + initialState, + Function.identity(), + 1, + taskExecutionListener, + 0); mockSuccessfulInitialize(null); mockSuccessfulProcessing(null); @@ -768,7 +824,8 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(1)).beforeTaskExecution(initialTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(processTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verifyNoMoreInteractions(taskExecutionListener); @@ -776,11 +833,19 @@ public class ShardConsumerTest { @Test public void testLongRunningTasks() throws Exception { - TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, Optional.of(1L), - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener, 0); + ShardConsumer consumer = new ShardConsumer( + cache, + executorService, + shardInfo, + Optional.of(1L), + shardConsumerArgument, + initialState, + Function.identity(), + 1, + taskExecutionListener, + 0); CyclicBarrier taskArriveBarrier = new CyclicBarrier(2); CyclicBarrier taskDepartBarrier = new CyclicBarrier(2); @@ -792,19 +857,19 @@ public class ShardConsumerTest { CompletableFuture initSuccess = consumer.initializeComplete(); awaitAndResetBarrier(taskArriveBarrier); - assertThat(consumer.taskRunningTime(), notNullValue()); + assertNotNull(consumer.taskRunningTime()); consumer.healthCheck(); awaitAndResetBarrier(taskDepartBarrier); - assertThat(initSuccess.get(), equalTo(false)); + assertFalse(initSuccess.get()); verify(initializeTask).call(); initSuccess = consumer.initializeComplete(); verify(initializeTask).call(); - assertThat(initSuccess.get(), equalTo(true)); + assertTrue(initSuccess.get()); consumer.healthCheck(); - assertThat(consumer.taskRunningTime(), nullValue()); + assertNull(consumer.taskRunningTime()); consumer.subscribe(); cache.awaitInitialSetup(); @@ -813,14 +878,14 @@ public class ShardConsumerTest { awaitAndResetBarrier(taskArriveBarrier); Instant previousTaskStartTime = consumer.taskDispatchedAt(); - assertThat(consumer.taskRunningTime(), notNullValue()); + assertNotNull(consumer.taskRunningTime()); consumer.healthCheck(); awaitAndResetBarrier(taskDepartBarrier); consumer.healthCheck(); cache.requestBarrier.await(); - assertThat(consumer.taskRunningTime(), nullValue()); + assertNull(consumer.taskRunningTime()); cache.requestBarrier.reset(); // Sleep for 10 millis before processing next task. If we don't; then the following @@ -831,37 +896,41 @@ public class ShardConsumerTest { awaitAndResetBarrier(taskArriveBarrier); Instant currentTaskStartTime = consumer.taskDispatchedAt(); - assertThat(currentTaskStartTime, not(equalTo(previousTaskStartTime))); + assertNotEquals(currentTaskStartTime, previousTaskStartTime); awaitAndResetBarrier(taskDepartBarrier); cache.requestBarrier.await(); - assertThat(consumer.taskRunningTime(), nullValue()); + assertNull(consumer.taskRunningTime()); cache.requestBarrier.reset(); consumer.leaseLost(); - assertThat(consumer.isShutdownRequested(), equalTo(true)); + assertTrue(consumer.isShutdownRequested()); CompletableFuture shutdownComplete = consumer.shutdownComplete(); awaitAndResetBarrier(taskArriveBarrier); - assertThat(consumer.taskRunningTime(), notNullValue()); + assertNotNull(consumer.taskRunningTime()); awaitAndResetBarrier(taskDepartBarrier); - assertThat(shutdownComplete.get(), equalTo(false)); + assertFalse(shutdownComplete.get()); shutdownComplete = consumer.shutdownComplete(); - assertThat(shutdownComplete.get(), equalTo(true)); + assertTrue(shutdownComplete.get()); - assertThat(consumer.taskRunningTime(), nullValue()); + assertNull(consumer.taskRunningTime()); consumer.healthCheck(); verify(taskExecutionListener, times(1)).beforeTaskExecution(initialTaskInput); verify(taskExecutionListener, times(2)).beforeTaskExecution(processTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verify(taskExecutionListener, times(2)).afterTaskExecution(processTaskInput); @@ -869,6 +938,126 @@ public class ShardConsumerTest { verifyNoMoreInteractions(taskExecutionListener); } + @Test + public void testEmptyShardProcessingRaceCondition() throws Exception { + final RecordsPublisher mockPublisher = mock(RecordsPublisher.class); + final ExecutorService mockExecutor = mock(ExecutorService.class); + final ConsumerState mockState = mock(ConsumerState.class); + final ShardConsumer consumer = new ShardConsumer( + mockPublisher, + mockExecutor, + shardInfo, + Optional.of(1L), + shardConsumerArgument, + mockState, + Function.identity(), + 1, + taskExecutionListener, + 0); + + when(mockState.state()).thenReturn(ShardConsumerState.WAITING_ON_PARENT_SHARDS); + when(mockState.taskType()).thenReturn(TaskType.BLOCK_ON_PARENT_SHARDS); + final ConsumerTask mockTask = mock(ConsumerTask.class); + when(mockState.createTask(any(), any(), any())).thenReturn(mockTask); + // Simulate successful BlockedOnParent task execution + // and successful Initialize task execution + when(mockTask.call()).thenReturn(new TaskResult(false)); + + log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to initiate async" + + " processing of blocked on parent task"); + consumer.executeLifecycle(); + final ArgumentCaptor taskToExecute = ArgumentCaptor.forClass(Runnable.class); + verify(mockExecutor, timeout(100)).execute(taskToExecute.capture()); + taskToExecute.getValue().run(); + log.info("RecordProcessor Thread: Simulated successful execution of Blocked on parent task"); + reset(mockExecutor); + + log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to move to InitializingState" + + " and initiate async processing of initialize task"); + when(mockState.successTransition()).thenReturn(mockState); + when(mockState.state()).thenReturn(ShardConsumerState.INITIALIZING); + when(mockState.taskType()).thenReturn(TaskType.INITIALIZE); + consumer.executeLifecycle(); + verify(mockExecutor, timeout(100)).execute(taskToExecute.capture()); + log.info("RecordProcessor Thread: Simulated successful execution of Initialize task"); + taskToExecute.getValue().run(); + + log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to move to ProcessingState" + + " and mark initialization future as complete"); + when(mockState.state()).thenReturn(ShardConsumerState.PROCESSING); + consumer.executeLifecycle(); + + // Simulate the race where + // scheduler invokes executeLifecycle which performs Publisher.subscribe(subscriber) + // on recordProcessor thread + // but before scheduler thread finishes initialization, handleInput is invoked + // on record processor thread. + + // Since ShardConsumer creates its own instance of subscriber that cannot be mocked + // this test sequence will appear a little odd. + // In order to control the order in which execution occurs, lets first invoke + // handleInput, although this will never happen, since there isn't a way + // to control the precise timing of the thread execution, this is the best way + final CountDownLatch processTaskLatch = new CountDownLatch(1); + new Thread(() -> { + reset(mockState); + when(mockState.taskType()).thenReturn(TaskType.PROCESS); + final ConsumerTask mockProcessTask = mock(ConsumerTask.class); + when(mockState.createTask(any(), any(), any())).thenReturn(mockProcessTask); + when(mockProcessTask.call()).then(input -> { + // first we want to wait for subscribe to be called, + // but we cannot control the timing, so wait for 10 seconds + // to let the main thread invoke executeLifecyle which + // will perform subscribe + processTaskLatch.countDown(); + log.info("Record Processor Thread: Holding shardConsumer lock, waiting for 10 seconds to" + + " let subscribe be called by scheduler thread"); + Thread.sleep(10 * 1000); + log.info("RecordProcessor Thread: Done waiting"); + // then return shard end result + log.info( + "RecordProcessor Thread: Simulating execution of ProcessTask and returning shard-end result"); + return new TaskResult(true); + }); + final Subscription mockSubscription = mock(Subscription.class); + consumer.handleInput( + ProcessRecordsInput.builder().isAtShardEnd(true).build(), mockSubscription); + }) + .start(); + + processTaskLatch.await(); + + // invoke executeLifecycle, which should invoke subscribe + // meanwhile if scheduler tries to acquire the ShardConsumer lock it will + // be blocked during initialization processing because handleInput was + // already invoked and will be holding the lock. Thereby creating the + // race condition we want. + reset(mockState); + AtomicBoolean successTransitionCalled = new AtomicBoolean(false); + when(mockState.successTransition()).then(input -> { + successTransitionCalled.set(true); + return mockState; + }); + AtomicBoolean shutdownTransitionCalled = new AtomicBoolean(false); + when(mockState.shutdownTransition(any())).then(input -> { + shutdownTransitionCalled.set(true); + return mockState; + }); + when(mockState.state()).then(input -> { + if (successTransitionCalled.get() && shutdownTransitionCalled.get()) { + return ShardConsumerState.SHUTTING_DOWN; + } + return ShardConsumerState.PROCESSING; + }); + log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to invoke subscribe and" + + " complete initialization"); + consumer.executeLifecycle(); + log.info("Scheduler Thread: Done initializing the ShardConsumer"); + + log.info("Verifying scheduler did not perform shutdown transition during initialization"); + verify(mockState, times(0)).shutdownTransition(any()); + } + private void mockSuccessfulShutdown(CyclicBarrier taskCallBarrier) { mockSuccessfulShutdown(taskCallBarrier, null); } @@ -893,7 +1082,8 @@ public class ShardConsumerTest { } private void mockSuccessfulProcessing(CyclicBarrier taskCallBarrier, CyclicBarrier taskInterlockBarrier) { - when(processingState.createTask(eq(shardConsumerArgument), any(), any())).thenReturn(processingTask); + when(processingState.createTask(eq(shardConsumerArgument), any(), any())) + .thenReturn(processingTask); when(processingState.requiresDataAvailability()).thenReturn(true); when(processingState.taskType()).thenReturn(TaskType.PROCESS); when(processingTask.taskType()).thenReturn(TaskType.PROCESS); @@ -918,7 +1108,6 @@ public class ShardConsumerTest { } private void mockSuccessfulInitialize(CyclicBarrier taskCallBarrier, CyclicBarrier taskInterlockBarrier) { - when(initialState.createTask(eq(shardConsumerArgument), any(), any())).thenReturn(initializeTask); when(initialState.taskType()).thenReturn(TaskType.INITIALIZE); when(initializeTask.taskType()).thenReturn(TaskType.INITIALIZE); @@ -931,7 +1120,6 @@ public class ShardConsumerTest { when(initialState.requiresDataAvailability()).thenReturn(false); when(initialState.successTransition()).thenReturn(processingState); when(initialState.state()).thenReturn(ConsumerStates.ShardConsumerState.INITIALIZING); - } private void mockSuccessfulUnblockOnParentsWithFailureTransition() { @@ -940,7 +1128,8 @@ public class ShardConsumerTest { } private void mockSuccessfulUnblockOnParents() { - when(blockedOnParentsState.createTask(eq(shardConsumerArgument), any(), any())).thenReturn(blockedOnParentsTask); + when(blockedOnParentsState.createTask(eq(shardConsumerArgument), any(), any())) + .thenReturn(blockedOnParentsTask); when(blockedOnParentsState.taskType()).thenReturn(TaskType.BLOCK_ON_PARENT_SHARDS); when(blockedOnParentsTask.taskType()).thenReturn(TaskType.BLOCK_ON_PARENT_SHARDS); when(blockedOnParentsTask.call()).thenAnswer(i -> blockOnParentsTaskResult); @@ -968,4 +1157,26 @@ public class ShardConsumerTest { return null; } + private ShardConsumer createShardConsumer(final RecordsPublisher publisher) { + return createShardConsumer(publisher, executorService, initialState); + } + + private ShardConsumer createShardConsumer(final ExecutorService executorService, final ConsumerState state) { + return createShardConsumer(recordsPublisher, executorService, state); + } + + private ShardConsumer createShardConsumer( + final RecordsPublisher publisher, final ExecutorService executorService, final ConsumerState state) { + return new ShardConsumer( + publisher, + executorService, + shardInfo, + logWarningForTaskAfterMillis, + shardConsumerArgument, + state, + Function.identity(), + 1, + taskExecutionListener, + 0); + } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java index ce026f1d..f216a09a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java @@ -14,12 +14,12 @@ */ package software.amazon.kinesis.lifecycle; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import org.junit.Assert; import org.junit.Test; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + /** * Unit tests of ShutdownReason enum class. */ @@ -42,5 +42,4 @@ public class ShutdownReasonTest { assertTrue(ShutdownReason.REQUESTED.canTransitionTo(ShutdownReason.LEASE_LOST)); assertTrue(ShutdownReason.REQUESTED.canTransitionTo(ShutdownReason.SHARD_END)); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java index 6617984d..db64d198 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java @@ -14,32 +14,20 @@ */ package software.amazon.kinesis.lifecycle; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static software.amazon.kinesis.lifecycle.ShutdownTask.RETRY_RANDOM_MAX_RANGE; - -import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; -import java.util.UUID; +import java.util.Set; -import com.google.common.collect.ImmutableList; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.Matchers; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.model.ChildShard; import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; import software.amazon.kinesis.common.InitialPositionInStream; @@ -58,8 +46,8 @@ import software.amazon.kinesis.leases.ShardObjectHelper; import software.amazon.kinesis.leases.UpdateField; import software.amazon.kinesis.leases.exceptions.CustomerApplicationException; import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion; import software.amazon.kinesis.leases.exceptions.InvalidStateException; +import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; import software.amazon.kinesis.lifecycle.events.ShardEndedInput; @@ -70,6 +58,22 @@ import software.amazon.kinesis.processor.ShardRecordProcessor; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.kinesis.lifecycle.ShutdownReason.LEASE_LOST; +import static software.amazon.kinesis.lifecycle.ShutdownReason.SHARD_END; + /** * */ @@ -78,53 +82,65 @@ public class ShutdownTaskTest { private static final long TASK_BACKOFF_TIME_MILLIS = 1L; private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - private static final ShutdownReason SHARD_END_SHUTDOWN_REASON = ShutdownReason.SHARD_END; - private static final ShutdownReason LEASE_LOST_SHUTDOWN_REASON = ShutdownReason.LEASE_LOST; private static final MetricsFactory NULL_METRICS_FACTORY = new NullMetricsFactory(); - private final String concurrencyToken = "0-1-2-3-4"; - private final String shardId = "shardId-0"; - private boolean cleanupLeasesOfCompletedShards = false; - private boolean ignoreUnexpectedChildShards = false; - private ShardInfo shardInfo; + private static final StreamIdentifier STREAM_IDENTIFIER = StreamIdentifier.singleStreamInstance("streamName"); + + /** + * Shard id for the default-provided {@link ShardInfo} and {@link Lease}. + */ + private static final String SHARD_ID = "shardId-0"; + + private static final ShardInfo SHARD_INFO = + new ShardInfo(SHARD_ID, "concurrencyToken", Collections.emptySet(), ExtendedSequenceNumber.LATEST); + private ShutdownTask task; - private StreamIdentifier streamIdentifier = StreamIdentifier.singleStreamInstance("streamName"); - + @Mock private RecordsPublisher recordsPublisher; + @Mock private ShardRecordProcessorCheckpointer recordProcessorCheckpointer; + @Mock private Checkpointer checkpointer; + @Mock private LeaseRefresher leaseRefresher; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private ShardDetector shardDetector; + @Mock private HierarchicalShardSyncer hierarchicalShardSyncer; + @Mock private ShardRecordProcessor shardRecordProcessor; + @Mock private LeaseCleanupManager leaseCleanupManager; + @Mock + private ShutdownNotification shutdownNotification; + @Before public void setUp() throws Exception { - doNothing().when(recordsPublisher).shutdown(); when(recordProcessorCheckpointer.checkpointer()).thenReturn(checkpointer); + when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END); final Lease childLease = new Lease(); childLease.leaseKey("childShardLeaseKey"); - when(hierarchicalShardSyncer.createLeaseForChildShard(Matchers.any(ChildShard.class), Matchers.any(StreamIdentifier.class))) + when(hierarchicalShardSyncer.createLeaseForChildShard( + Matchers.any(ChildShard.class), Matchers.any(StreamIdentifier.class))) .thenReturn(childLease); + setupLease(SHARD_ID, Collections.emptyList()); - shardInfo = new ShardInfo(shardId, concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); + when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); + when(shardDetector.streamIdentifier()).thenReturn(STREAM_IDENTIFIER); - task = new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - SHARD_END_SHUTDOWN_REASON, INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, - ignoreUnexpectedChildShards, leaseCoordinator, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, - hierarchicalShardSyncer, NULL_METRICS_FACTORY, constructChildShards(), streamIdentifier, leaseCleanupManager); + task = createShutdownTask(SHARD_END, constructChildrenFromSplit()); } /** @@ -132,13 +148,8 @@ public class ShutdownTaskTest { * This test is for the scenario that customer doesn't implement checkpoint in their implementation */ @Test - public final void testCallWhenApplicationDoesNotCheckpoint() throws Exception { + public final void testCallWhenApplicationDoesNotCheckpoint() { when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(new ExtendedSequenceNumber("3298")); - Lease heldLease = LeaseHelper.createLease("shardId-0", "leaseOwner", Collections.singleton("parentShardId"), Collections.emptyList(), ExtendedSequenceNumber.LATEST); - when(leaseCoordinator.getCurrentlyHeldLease("shardId-0")).thenReturn(heldLease); - when(leaseRefresher.getLease("shardId-0")).thenReturn(heldLease); - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); - when(leaseCoordinator.updateLease(Matchers.any(Lease.class), Matchers.any(UUID.class), Matchers.anyString(), Matchers.anyString())).thenReturn(true); final TaskResult result = task.call(); assertNotNull(result.getException()); @@ -151,17 +162,14 @@ public class ShutdownTaskTest { */ @Test public final void testCallWhenCreatingNewLeasesThrows() throws Exception { - when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END); - Lease heldLease = LeaseHelper.createLease("shardId-0", "leaseOwner", Collections.singleton("parentShardId")); - when(leaseCoordinator.getCurrentlyHeldLease("shardId-0")).thenReturn(heldLease); - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); - when(hierarchicalShardSyncer.createLeaseForChildShard(Matchers.any(ChildShard.class), Matchers.any(StreamIdentifier.class))) + when(hierarchicalShardSyncer.createLeaseForChildShard( + Matchers.any(ChildShard.class), Matchers.any(StreamIdentifier.class))) .thenThrow(new InvalidStateException("InvalidStateException is thrown")); final TaskResult result = task.call(); assertNull(result.getException()); verify(recordsPublisher).shutdown(); - verify(shardRecordProcessor, never()).shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); + verify(shardRecordProcessor, never()).shardEnded(any(ShardEndedInput.class)); verify(shardRecordProcessor).leaseLost(LeaseLostInput.builder().build()); verify(leaseCoordinator).dropLease(Matchers.any(Lease.class)); } @@ -171,146 +179,112 @@ public class ShutdownTaskTest { * This test is for the scenario that ShutdownTask is created for ShardConsumer reaching the Shard End. */ @Test - public final void testCallWhenTrueShardEnd() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - task = new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - SHARD_END_SHUTDOWN_REASON, INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, - ignoreUnexpectedChildShards, leaseCoordinator, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, - hierarchicalShardSyncer, NULL_METRICS_FACTORY, constructChildShards(), streamIdentifier, leaseCleanupManager); - - when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END); - Lease heldLease = LeaseHelper.createLease("shardId-0", "leaseOwner", Collections.singleton("parentShardId")); - when(leaseCoordinator.getCurrentlyHeldLease("shardId-0")).thenReturn(heldLease); - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); - when(leaseCoordinator.updateLease(Matchers.any(Lease.class), Matchers.any(UUID.class), Matchers.anyString(), Matchers.anyString())).thenReturn(true); - when(leaseRefresher.getLease("shardId-0")).thenReturn(heldLease); - + public final void testCallWhenTrueShardEnd() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { final TaskResult result = task.call(); assertNull(result.getException()); - verify(recordsPublisher).shutdown(); - verify(shardRecordProcessor).shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); - verify(shardRecordProcessor, never()).leaseLost(LeaseLostInput.builder().build()); + verifyShutdownAndNoDrop(); + verify(shardRecordProcessor) + .shardEnded(ShardEndedInput.builder() + .checkpointer(recordProcessorCheckpointer) + .build()); verify(leaseRefresher).updateLeaseWithMetaInfo(Matchers.any(Lease.class), Matchers.any(UpdateField.class)); verify(leaseRefresher, times(2)).createLeaseIfNotExists(Matchers.any(Lease.class)); - verify(leaseCoordinator, never()).dropLease(Matchers.any(Lease.class)); - verify(leaseCleanupManager, times(1)).enqueueForDeletion(any(LeasePendingDeletion.class)); + verify(leaseCleanupManager).enqueueForDeletion(any(LeasePendingDeletion.class)); } + /** + * Tests the scenario when one, but not both, parent shards are accessible. + * This test should drop the lease so another worker can make an attempt. + */ @Test - public final void testCallThrowsUntilParentInfoNotPresentInLease() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END); - Lease heldLease = LeaseHelper.createLease("shardId-0", "leaseOwner", ImmutableList.of("parent1", "parent2")); - Lease parentLease = LeaseHelper.createLease("shardId-1", "leaseOwner", Collections.emptyList()); - when(leaseCoordinator.getCurrentlyHeldLease("shardId-0")).thenReturn(heldLease); - when(leaseCoordinator.getCurrentlyHeldLease("shardId-1")) - .thenReturn(null, null, null, null, null, parentLease); - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); - when(leaseCoordinator.updateLease(Matchers.any(Lease.class), Matchers.any(UUID.class), Matchers.anyString(), Matchers.anyString())).thenReturn(true); - when(leaseRefresher.getLease("shardId-0")).thenReturn(heldLease); - // Return null lease first time to simulate partial parent lease info - when(leaseRefresher.getLease("shardId-1")) - .thenReturn(null, null, null, null, null, parentLease); - - // Make first 5 attempts with partial parent info in lease table - for (int i = 0; i < 5; i++) { - ShutdownTask task = spy(new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - SHARD_END_SHUTDOWN_REASON, INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, - ignoreUnexpectedChildShards, leaseCoordinator, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, - hierarchicalShardSyncer, NULL_METRICS_FACTORY, constructChildShard(), streamIdentifier, leaseCleanupManager)); - when(task.isOneInNProbability(RETRY_RANDOM_MAX_RANGE)).thenReturn(false); - TaskResult result = task.call(); - assertNotNull(result.getException()); - assertTrue(result.getException() instanceof BlockedOnParentShardException); - assertTrue(result.getException().getMessage().contains("has partial parent information in lease table")); - verify(recordsPublisher, never()).shutdown(); - verify(shardRecordProcessor, never()) - .shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); - verify(shardRecordProcessor, never()).leaseLost(LeaseLostInput.builder().build()); - verify(leaseCoordinator, never()) - .updateLease(Matchers.any(Lease.class), Matchers.any(UUID.class), Matchers.anyString(), Matchers.anyString()); - verify(leaseRefresher, never()).createLeaseIfNotExists(Matchers.any(Lease.class)); - verify(task, times(1)).isOneInNProbability(RETRY_RANDOM_MAX_RANGE); - verify(leaseCoordinator, never()).dropLease(Matchers.any(Lease.class)); - verify(leaseCleanupManager, never()).enqueueForDeletion(any(LeasePendingDeletion.class)); - } - - // make next attempt with complete parent info in lease table - ShutdownTask task = spy(new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - SHARD_END_SHUTDOWN_REASON, INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, - ignoreUnexpectedChildShards, leaseCoordinator, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, - hierarchicalShardSyncer, NULL_METRICS_FACTORY, constructChildShard(), streamIdentifier, leaseCleanupManager)); - when(task.isOneInNProbability(RETRY_RANDOM_MAX_RANGE)).thenReturn(false); - TaskResult result = task.call(); - assertNull(result.getException()); - verify(recordsPublisher).shutdown(); - verify(shardRecordProcessor).shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); - verify(shardRecordProcessor, never()).leaseLost(LeaseLostInput.builder().build()); - verify(leaseRefresher).updateLeaseWithMetaInfo(Matchers.any(Lease.class), Matchers.any(UpdateField.class)); - verify(leaseRefresher, times(1)).createLeaseIfNotExists(Matchers.any(Lease.class)); - verify(task, never()).isOneInNProbability(RETRY_RANDOM_MAX_RANGE); - verify(leaseCoordinator, never()).dropLease(Matchers.any(Lease.class)); - verify(leaseCleanupManager, times(1)).enqueueForDeletion(any(LeasePendingDeletion.class)); + public void testMergeChildWhereOneParentHasLeaseAndInvalidState() throws Exception { + testMergeChildWhereOneParentHasLease(false); } + /** + * Tests the scenario when one, but not both, parent shards are accessible. + * This test should retain the lease. + */ @Test - public final void testCallTriggersLeaseLossWhenParentInfoNotPresentInLease() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - shardInfo = new ShardInfo("shardId-0", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); + public void testMergeChildWhereOneParentHasLeaseAndBlockOnParent() throws Exception { + testMergeChildWhereOneParentHasLease(true); + } - when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END); - Lease heldLease = LeaseHelper.createLease("shardId-0", "leaseOwner", ImmutableList.of("parent1", "parent2")); - when(leaseCoordinator.getCurrentlyHeldLease("shardId-0")).thenReturn(heldLease); - when(leaseCoordinator.getCurrentlyHeldLease("shardId-1")) - .thenReturn(null, null, null, null, null, null, null, null, null, null, null); - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); - when(leaseCoordinator.updateLease(Matchers.any(Lease.class), Matchers.any(UUID.class), Matchers.anyString(), Matchers.anyString())).thenReturn(true); - when(leaseRefresher.getLease("shardId-0")).thenReturn(heldLease); - // Return null lease first time to simulate partial parent lease info - when(leaseRefresher.getLease("shardId-1")) - .thenReturn(null, null, null, null, null, null, null, null, null, null, null); + private void testMergeChildWhereOneParentHasLease(final boolean blockOnParent) throws Exception { + // the @Before setup makes the `SHARD_ID` parent accessible + final ChildShard mergeChild = constructChildFromMerge(); + final TaskResult result = createShutdownTaskSpy(blockOnParent, Collections.singletonList(mergeChild)) + .call(); - // Make first 10 attempts with partial parent info in lease table - for (int i = 0; i < 10; i++) { - ShutdownTask task = spy(new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - SHARD_END_SHUTDOWN_REASON, INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, - ignoreUnexpectedChildShards, leaseCoordinator, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, - hierarchicalShardSyncer, NULL_METRICS_FACTORY, constructChildShard(), streamIdentifier, leaseCleanupManager)); - when(task.isOneInNProbability(RETRY_RANDOM_MAX_RANGE)).thenReturn(false); - TaskResult result = task.call(); + if (blockOnParent) { assertNotNull(result.getException()); - assertTrue(result.getException() instanceof BlockedOnParentShardException); - assertTrue(result.getException().getMessage().contains("has partial parent information in lease table")); + assertEquals( + BlockedOnParentShardException.class, result.getException().getClass()); + + verify(leaseCoordinator, never()).dropLease(any(Lease.class)); + verify(shardRecordProcessor, never()).leaseLost(any(LeaseLostInput.class)); verify(recordsPublisher, never()).shutdown(); - verify(shardRecordProcessor, never()) - .shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); - verify(shardRecordProcessor, never()).leaseLost(LeaseLostInput.builder().build()); - verify(leaseCoordinator, never()) - .updateLease(Matchers.any(Lease.class), Matchers.any(UUID.class), Matchers.anyString(), Matchers.anyString()); - verify(leaseRefresher, never()).createLeaseIfNotExists(Matchers.any(Lease.class)); - verify(task, times(1)).isOneInNProbability(RETRY_RANDOM_MAX_RANGE); - verify(leaseCoordinator, never()).dropLease(Matchers.any(Lease.class)); - verify(leaseCleanupManager, never()).enqueueForDeletion(any(LeasePendingDeletion.class)); + } else { + assertNull(result.getException()); + + // verify that only the accessible parent was dropped + final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); + verify(leaseCoordinator).dropLease(leaseCaptor.capture()); + assertEquals(SHARD_ID, leaseCaptor.getValue().leaseKey()); + + verify(shardRecordProcessor).leaseLost(any(LeaseLostInput.class)); + verify(recordsPublisher).shutdown(); } - // make final attempt with incomplete parent info in lease table - ShutdownTask task = spy(new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - SHARD_END_SHUTDOWN_REASON, INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, - ignoreUnexpectedChildShards, leaseCoordinator, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, - hierarchicalShardSyncer, NULL_METRICS_FACTORY, constructChildShard(), streamIdentifier, leaseCleanupManager)); - when(task.isOneInNProbability(RETRY_RANDOM_MAX_RANGE)).thenReturn(true); - TaskResult result = task.call(); - assertNull(result.getException()); - verify(recordsPublisher).shutdown(); - verify(shardRecordProcessor, never()).shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); - verify(shardRecordProcessor).leaseLost(LeaseLostInput.builder().build()); - verify(leaseRefresher, never()).updateLeaseWithMetaInfo(Matchers.any(Lease.class), Matchers.any(UpdateField.class)); - verify(leaseRefresher, never()).createLeaseIfNotExists(Matchers.any(Lease.class)); - verify(task, times(1)).isOneInNProbability(RETRY_RANDOM_MAX_RANGE); - verify(leaseCoordinator).dropLease(Matchers.any(Lease.class)); + // verify that an attempt was made to retrieve both parents + final ArgumentCaptor leaseKeyCaptor = ArgumentCaptor.forClass(String.class); + verify(leaseRefresher, times(mergeChild.parentShards().size())).getLease(leaseKeyCaptor.capture()); + assertEquals(mergeChild.parentShards(), leaseKeyCaptor.getAllValues()); + verify(leaseCleanupManager, never()).enqueueForDeletion(any(LeasePendingDeletion.class)); + verify(leaseRefresher, never()).updateLeaseWithMetaInfo(any(Lease.class), any(UpdateField.class)); + verify(leaseRefresher, never()).createLeaseIfNotExists(any(Lease.class)); + verify(shardRecordProcessor, never()).shardEnded(any(ShardEndedInput.class)); + } + + @Test + public final void testMergeChildWhereBothParentsHaveLeases() throws Exception { + // the @Before test setup makes the `SHARD_ID` parent accessible + final ChildShard mergeChild = constructChildFromMerge(); + // make second parent accessible + setupLease(mergeChild.parentShards().get(1), Collections.emptyList()); + + final Lease mockChildLease = mock(Lease.class); + when(hierarchicalShardSyncer.createLeaseForChildShard(mergeChild, STREAM_IDENTIFIER)) + .thenReturn(mockChildLease); + + final TaskResult result = createShutdownTask(SHARD_END, Collections.singletonList(mergeChild)) + .call(); + + assertNull(result.getException()); + verify(leaseCleanupManager).enqueueForDeletion(any(LeasePendingDeletion.class)); + + final ArgumentCaptor updateLeaseCaptor = ArgumentCaptor.forClass(Lease.class); + verify(leaseRefresher).updateLeaseWithMetaInfo(updateLeaseCaptor.capture(), eq(UpdateField.CHILD_SHARDS)); + final Lease updatedLease = updateLeaseCaptor.getValue(); + assertEquals(SHARD_ID, updatedLease.leaseKey()); + assertEquals(Collections.singleton(mergeChild.shardId()), updatedLease.childShardIds()); + + verify(leaseRefresher).createLeaseIfNotExists(mockChildLease); + + // verify all parent+child leases were retrieved + final Set expectedShardIds = new HashSet<>(mergeChild.parentShards()); + expectedShardIds.add(mergeChild.shardId()); + final ArgumentCaptor leaseKeyCaptor = ArgumentCaptor.forClass(String.class); + verify(leaseRefresher, atLeast(expectedShardIds.size())).getLease(leaseKeyCaptor.capture()); + assertEquals(expectedShardIds, new HashSet<>(leaseKeyCaptor.getAllValues())); + + verifyShutdownAndNoDrop(); + verify(shardRecordProcessor) + .shardEnded(ShardEndedInput.builder() + .checkpointer(recordProcessorCheckpointer) + .build()); } /** @@ -319,25 +293,16 @@ public class ShutdownTaskTest { */ @Test public final void testCallWhenShardNotFound() throws Exception { - final Lease heldLease = LeaseHelper.createLease("shardId-4", "leaseOwner", Collections.emptyList()); - shardInfo = new ShardInfo("shardId-4", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - task = new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - SHARD_END_SHUTDOWN_REASON, INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, - ignoreUnexpectedChildShards, leaseCoordinator, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, - hierarchicalShardSyncer, NULL_METRICS_FACTORY, new ArrayList<>(), streamIdentifier, leaseCleanupManager); + final Lease lease = setupLease("shardId-4", Collections.emptyList()); + final ShardInfo shardInfo = new ShardInfo( + lease.leaseKey(), "concurrencyToken", Collections.emptySet(), ExtendedSequenceNumber.LATEST); - when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END); - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); - when(leaseRefresher.getLease("shardId-4")).thenReturn(heldLease); - when(leaseCoordinator.getCurrentlyHeldLease("shardId-4")).thenReturn(heldLease); + final TaskResult result = createShutdownTask(SHARD_END, Collections.emptyList(), shardInfo) + .call(); - final TaskResult result = task.call(); assertNull(result.getException()); - verify(recordsPublisher).shutdown(); - verify(shardRecordProcessor, never()).leaseLost(LeaseLostInput.builder().build()); - verify(leaseRefresher, never()).createLeaseIfNotExists(Matchers.any(Lease.class)); - verify(leaseCoordinator, never()).dropLease(Matchers.any(Lease.class)); + verifyShutdownAndNoDrop(); + verify(leaseRefresher, never()).createLeaseIfNotExists(any(Lease.class)); } /** @@ -345,23 +310,42 @@ public class ShutdownTaskTest { * This test is for the scenario that a ShutdownTask is created for the ShardConsumer losing the lease. */ @Test - public final void testCallWhenLeaseLost() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - shardInfo = new ShardInfo("shardId-4", concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - task = new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - LEASE_LOST_SHUTDOWN_REASON, INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, - ignoreUnexpectedChildShards, leaseCoordinator, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, - hierarchicalShardSyncer, NULL_METRICS_FACTORY, new ArrayList<>(), streamIdentifier, leaseCleanupManager); + public final void testCallWhenLeaseLost() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + final TaskResult result = + createShutdownTask(LEASE_LOST, Collections.emptyList()).call(); - final TaskResult result = task.call(); assertNull(result.getException()); verify(recordsPublisher).shutdown(); - verify(shardRecordProcessor, never()).shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); + verify(shardRecordProcessor, never()) + .shardEnded(ShardEndedInput.builder() + .checkpointer(recordProcessorCheckpointer) + .build()); verify(shardRecordProcessor).leaseLost(LeaseLostInput.builder().build()); verify(leaseCoordinator, never()).getAssignments(); verify(leaseRefresher, never()).createLeaseIfNotExists(any(Lease.class)); verify(leaseCoordinator, never()).dropLease(any(Lease.class)); } + + @Test + public void testNullChildShards() throws Exception { + final TaskResult result = createShutdownTask(SHARD_END, null).call(); + + assertNull(result.getException()); + verifyShutdownAndNoDrop(); + verify(leaseCleanupManager).enqueueForDeletion(any(LeasePendingDeletion.class)); + verify(leaseRefresher, never()).createLeaseIfNotExists(any(Lease.class)); + } + + @Test + public void testCallWhenShutdownNotificationIsNull() { + final TaskResult result = + createShutdownTask(LEASE_LOST, Collections.emptyList()).call(); + assertNull(result.getException()); + verify(recordsPublisher).shutdown(); + verify(shutdownNotification, never()).shutdownComplete(); + } + /** * Test method for {@link ShutdownTask#taskType()}. */ @@ -370,36 +354,77 @@ public class ShutdownTaskTest { assertEquals(TaskType.SHUTDOWN, task.taskType()); } - private List constructChildShards() { - List childShards = new ArrayList<>(); - List parentShards = new ArrayList<>(); - parentShards.add(shardId); - ChildShard leftChild = ChildShard.builder() - .shardId("ShardId-1") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) - .build(); - ChildShard rightChild = ChildShard.builder() - .shardId("ShardId-2") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) - .build(); - childShards.add(leftChild); - childShards.add(rightChild); - return childShards; + private void verifyShutdownAndNoDrop() { + verify(recordsPublisher).shutdown(); + verify(leaseCoordinator, never()).dropLease(any(Lease.class)); + verify(shardRecordProcessor, never()).leaseLost(any(LeaseLostInput.class)); } - private List constructChildShard() { - List childShards = new ArrayList<>(); - List parentShards = new ArrayList<>(); - parentShards.add(shardId); - parentShards.add("shardId-1"); + private Lease setupLease(final String leaseKey, final Collection parentShardIds) throws Exception { + final Lease lease = LeaseHelper.createLease(leaseKey, "leaseOwner", parentShardIds); + when(leaseCoordinator.getCurrentlyHeldLease(lease.leaseKey())).thenReturn(lease); + when(leaseRefresher.getLease(lease.leaseKey())).thenReturn(lease); + return lease; + } + + /** + * Constructs two {@link ChildShard}s that mimic a shard split operation. + */ + private List constructChildrenFromSplit() { + List parentShards = Collections.singletonList(SHARD_ID); ChildShard leftChild = ChildShard.builder() + .shardId("ShardId-1") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) + .build(); + ChildShard rightChild = ChildShard.builder() + .shardId("ShardId-2") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) + .build(); + return Arrays.asList(leftChild, rightChild); + } + + /** + * Constructs a {@link ChildShard} that mimics a shard merge operation. + */ + private ChildShard constructChildFromMerge() { + List parentShards = Arrays.asList(SHARD_ID, "shardId-1"); + return ChildShard.builder() .shardId("shardId-2") .parentShards(parentShards) .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) .build(); - childShards.add(leftChild); - return childShards; + } + + private ShutdownTask createShutdownTaskSpy(final boolean blockOnParent, final List childShards) { + final ShutdownTask spy = spy(createShutdownTask(SHARD_END, childShards)); + when(spy.isOneInNProbability(ShutdownTask.RETRY_RANDOM_MAX_RANGE)).thenReturn(!blockOnParent); + return spy; + } + + private ShutdownTask createShutdownTask(final ShutdownReason reason, final List childShards) { + return createShutdownTask(reason, childShards, SHARD_INFO); + } + + private ShutdownTask createShutdownTask( + final ShutdownReason reason, final List childShards, final ShardInfo shardInfo) { + return new ShutdownTask( + shardInfo, + shardDetector, + shardRecordProcessor, + recordProcessorCheckpointer, + reason, + INITIAL_POSITION_TRIM_HORIZON, + false, + false, + leaseCoordinator, + TASK_BACKOFF_TIME_MILLIS, + recordsPublisher, + hierarchicalShardSyncer, + NULL_METRICS_FACTORY, + childShards, + STREAM_IDENTIFIER, + leaseCleanupManager); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java index 578ed98d..a1782cf7 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java @@ -16,11 +16,9 @@ package software.amazon.kinesis.metrics; import org.junit.Assert; import org.junit.Test; - import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - public class AccumulatingMetricsScopeTest { private static class TestScope extends AccumulateByNameMetricsScope { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java index 7f40266b..a1676aaf 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java @@ -28,7 +28,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; @@ -63,7 +62,7 @@ public class CloudWatchMetricsPublisherTest { List> dataToPublish = constructMetricDatumWithKeyList(25); List> expectedData = constructMetricDatumListMap(dataToPublish); publisher.publishMetrics(dataToPublish); - + ArgumentCaptor argument = ArgumentCaptor.forClass(PutMetricDataRequest.class); Mockito.verify(cloudWatchClient, Mockito.atLeastOnce()).putMetricData(argument.capture()); @@ -73,7 +72,6 @@ public class CloudWatchMetricsPublisherTest { for (int i = 0; i < requests.size(); i++) { assertMetricData(expectedData.get(i), requests.get(i)); } - } public static List> constructMetricDatumWithKeyList(int value) { @@ -89,7 +87,8 @@ public class CloudWatchMetricsPublisherTest { // batchSize is the number of metrics sent in a single request. // In CloudWatchMetricsPublisher this number is set to 20. - public List> constructMetricDatumListMap(List> data) { + public List> constructMetricDatumListMap( + List> data) { int batchSize = 20; List> dataList = new ArrayList>(); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java index a6c29fb8..4596835d 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java @@ -24,7 +24,6 @@ import org.mockito.Mockito; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - public class CloudWatchPublisherRunnableTest { private static final int MAX_QUEUE_SIZE = 5; @@ -36,7 +35,8 @@ public class CloudWatchPublisherRunnableTest { private static final int FLUSH_SIZE = 2; private static class TestHarness { - private List> data = new ArrayList>(); + private List> data = + new ArrayList>(); private int counter = 0; private CloudWatchMetricsPublisher publisher; private CloudWatchPublisherRunnable runnable; @@ -44,16 +44,12 @@ public class CloudWatchPublisherRunnableTest { TestHarness() { publisher = Mockito.mock(CloudWatchMetricsPublisher.class); - runnable = new CloudWatchPublisherRunnable(publisher, - MAX_BUFFER_TIME_MILLIS, - MAX_QUEUE_SIZE, - FLUSH_SIZE) { + runnable = new CloudWatchPublisherRunnable(publisher, MAX_BUFFER_TIME_MILLIS, MAX_QUEUE_SIZE, FLUSH_SIZE) { @Override protected long getTime() { return time; } - }; } @@ -67,12 +63,8 @@ public class CloudWatchPublisherRunnableTest { } private MetricDatumWithKey constructDatum(int value) { - MetricDatum datum = TestHelper.constructDatum("datum-" + Integer.toString(value), - StandardUnit.COUNT, - value, - value, - value, - 1); + MetricDatum datum = TestHelper.constructDatum( + "datum-" + Integer.toString(value), StandardUnit.COUNT, value, value, value, 1); return new MetricDatumWithKey(new CloudWatchMetricKey(datum), datum); } @@ -80,7 +72,7 @@ public class CloudWatchPublisherRunnableTest { /** * Run one iteration of the runnable and assert that it called CloudWatch with count records beginning with * record startIndex, and no more than that. - * + * * @param startIndex * @param count */ diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java index 2a32764d..a4572de0 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java @@ -15,15 +15,11 @@ package software.amazon.kinesis.metrics; import org.junit.Test; - import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; -import software.amazon.kinesis.metrics.EndingMetricsScope; public class EndingMetricsScopeTest { - private static class TestScope extends EndingMetricsScope { - - } + private static class TestScope extends EndingMetricsScope {} @Test public void testAddDataNotEnded() { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java index 5320588c..4478c07d 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java @@ -16,13 +16,11 @@ package software.amazon.kinesis.metrics; import java.util.Set; +import com.google.common.collect.ImmutableSet; import lombok.AccessLevel; import lombok.NoArgsConstructor; import org.junit.Assert; import org.junit.Test; - -import com.google.common.collect.ImmutableSet; - import software.amazon.awssdk.services.cloudwatch.model.Dimension; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; @@ -49,7 +47,9 @@ public class FilteringMetricsScopeTest { Assert.assertTrue(getDimensions().remove(dimension)); } - Assert.assertTrue("Dimensions should be empty at the end of assertDimensions", getDimensions().isEmpty()); + Assert.assertTrue( + "Dimensions should be empty at the end of assertDimensions", + getDimensions().isEmpty()); } } @@ -106,8 +106,9 @@ public class FilteringMetricsScopeTest { @Test public void testMetricsDimensionsAll() { - TestScope scope = new TestScope(MetricsLevel.DETAILED, ImmutableSet.of( - "ThisDoesNotMatter", MetricsScope.METRICS_DIMENSIONS_ALL, "ThisAlsoDoesNotMatter")); + TestScope scope = new TestScope( + MetricsLevel.DETAILED, + ImmutableSet.of("ThisDoesNotMatter", MetricsScope.METRICS_DIMENSIONS_ALL, "ThisAlsoDoesNotMatter")); scope.addDimension("ShardId", "shard-0001"); scope.addDimension("Operation", "ProcessRecords"); scope.addDimension("ShardId", "shard-0001"); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java index 18bba742..4472776f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java @@ -21,12 +21,10 @@ import java.util.List; import org.junit.Assert; import org.junit.Before; import org.junit.Test; - import software.amazon.awssdk.services.cloudwatch.model.Dimension; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - public class MetricAccumulatingQueueTest { private static final int MAX_QUEUE_SIZE = 5; @@ -40,48 +38,58 @@ public class MetricAccumulatingQueueTest { private Dimension dim(String name, String value) { return Dimension.builder().name(name).value(value).build(); } - + /* - * Test whether the MetricDatums offered into the queue will accumulate data based on the same metricName and + * Test whether the MetricDatums offered into the queue will accumulate data based on the same metricName and * output those datums with the correctly accumulated output. */ @Test public void testAccumulation() { - Collection dimensionsA = Collections.singleton(dim("name","a")); - Collection dimensionsB = Collections.singleton(dim("name","b")); + Collection dimensionsA = Collections.singleton(dim("name", "a")); + Collection dimensionsB = Collections.singleton(dim("name", "b")); String keyA = "a"; String keyB = "b"; - MetricDatum datum1 = - TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 5, 15, 2).toBuilder().dimensions(dimensionsA).build(); + MetricDatum datum1 = TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 5, 15, 2).toBuilder() + .dimensions(dimensionsA) + .build(); queue.offer(new CloudWatchMetricKey(datum1), datum1); - MetricDatum datum2 = - TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder().dimensions(dimensionsA).build(); + MetricDatum datum2 = TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder() + .dimensions(dimensionsA) + .build(); queue.offer(new CloudWatchMetricKey(datum2), datum2); - MetricDatum datum3 = - TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder().dimensions(dimensionsB).build(); + MetricDatum datum3 = TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder() + .dimensions(dimensionsB) + .build(); queue.offer(new CloudWatchMetricKey(datum3), datum3); MetricDatum datum4 = TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2); queue.offer(new CloudWatchMetricKey(datum4), datum4); queue.offer(new CloudWatchMetricKey(datum4), datum4); - MetricDatum datum5 = - TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2).toBuilder().dimensions(dimensionsA).build(); + MetricDatum datum5 = TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2).toBuilder() + .dimensions(dimensionsA) + .build(); queue.offer(new CloudWatchMetricKey(datum5), datum5); Assert.assertEquals(4, queue.size()); List> items = queue.drain(4); - Assert.assertEquals(items.get(0).datum, TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 1, 17, 4) - .toBuilder().dimensions(dimensionsA).build()); + Assert.assertEquals( + items.get(0).datum, + TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 1, 17, 4).toBuilder() + .dimensions(dimensionsA) + .build()); Assert.assertEquals(items.get(1).datum, datum3); Assert.assertEquals(items.get(2).datum, TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 4, 4)); - Assert.assertEquals(items.get(3).datum, TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2) - .toBuilder().dimensions(dimensionsA).build()); + Assert.assertEquals( + items.get(3).datum, + TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2).toBuilder() + .dimensions(dimensionsA) + .build()); } - + /* * Test that the number of MetricDatum that can be added to our queue is capped at the MAX_QUEUE_SIZE. * Therefore, any datums added to the queue that is greater than the capacity of our queue will be dropped. diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java index 5d6c2389..e358d5ac 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java @@ -14,25 +14,24 @@ */ package software.amazon.kinesis.metrics; - import software.amazon.awssdk.services.cloudwatch.model.Dimension; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; public class TestHelper { - public static MetricDatum constructDatum(String name, - StandardUnit unit, - double maximum, - double minimum, - double sum, - double count) { - return MetricDatum.builder().metricName(name) + public static MetricDatum constructDatum( + String name, StandardUnit unit, double maximum, double minimum, double sum, double count) { + return MetricDatum.builder() + .metricName(name) .unit(unit) - .statisticValues(StatisticSet.builder().maximum(maximum) + .statisticValues(StatisticSet.builder() + .maximum(maximum) .minimum(minimum) .sum(sum) - .sampleCount(count).build()).build(); + .sampleCount(count) + .build()) + .build(); } public static Dimension constructDimension(String name, String value) { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/processor/SingleStreamTrackerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/processor/SingleStreamTrackerTest.java index 9ae19ba3..1ac8822f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/processor/SingleStreamTrackerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/processor/SingleStreamTrackerTest.java @@ -15,11 +15,6 @@ package software.amazon.kinesis.processor; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertThat; - import org.hamcrest.Matchers; import org.junit.Test; import software.amazon.kinesis.common.InitialPositionInStream; @@ -27,6 +22,11 @@ import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertThat; + public class SingleStreamTrackerTest { private static final String STREAM_NAME = SingleStreamTrackerTest.class.getSimpleName(); @@ -43,8 +43,8 @@ public class SingleStreamTrackerTest { InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); assertNotEquals(expectedPosition, StreamTracker.DEFAULT_POSITION_IN_STREAM); - final StreamTracker tracker = new SingleStreamTracker( - StreamIdentifier.singleStreamInstance(STREAM_NAME), expectedPosition); + final StreamTracker tracker = + new SingleStreamTracker(StreamIdentifier.singleStreamInstance(STREAM_NAME), expectedPosition); validate(tracker, expectedPosition); } @@ -55,12 +55,12 @@ public class SingleStreamTrackerTest { private static void validate(StreamTracker tracker, InitialPositionInStreamExtended expectedPosition) { assertEquals(1, tracker.streamConfigList().size()); assertFalse(tracker.isMultiStream()); - assertThat(tracker.formerStreamsLeasesDeletionStrategy(), + assertThat( + tracker.formerStreamsLeasesDeletionStrategy(), Matchers.instanceOf(FormerStreamsLeasesDeletionStrategy.NoLeaseDeletionStrategy.class)); final StreamConfig config = tracker.streamConfigList().get(0); assertEquals(STREAM_NAME, config.streamIdentifier().streamName()); assertEquals(expectedPosition, config.initialPositionInStreamExtended()); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java index 8319a0ac..c90f108b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java @@ -15,10 +15,9 @@ package software.amazon.kinesis.retrieval; -import org.junit.Test; - import lombok.Getter; import lombok.extern.slf4j.Slf4j; +import org.junit.Test; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.isA; @@ -27,31 +26,28 @@ import static org.junit.Assert.assertThat; @Slf4j public class AWSExceptionManagerTest { + private static final String EXPECTED_HANDLING_MARKER = AWSExceptionManagerTest.class.getSimpleName(); + + private final AWSExceptionManager manager = new AWSExceptionManager(); + @Test public void testSpecificException() { - AWSExceptionManager manager = new AWSExceptionManager(); - final String EXPECTED_HANDLING_MARKER = "Handled-TestException"; - manager.add(TestException.class, t -> { log.info("Handling test exception: {} -> {}", t.getMessage(), t.getAdditionalMessage()); return new RuntimeException(EXPECTED_HANDLING_MARKER, t); }); - TestException te = new TestException("Main Mesage", "Sub Message"); - + TestException te = new TestException("Main Message", "Sub Message"); RuntimeException converted = manager.apply(te); assertThat(converted, isA(RuntimeException.class)); assertThat(converted.getMessage(), equalTo(EXPECTED_HANDLING_MARKER)); assertThat(converted.getCause(), equalTo(te)); - } @Test public void testParentException() { - AWSExceptionManager manager = new AWSExceptionManager(); - final String EXPECTED_HANDLING_MARKER = "Handled-IllegalStateException"; manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i)); manager.add(Exception.class, i -> new RuntimeException("RawException", i)); manager.add(IllegalStateException.class, i -> new RuntimeException(EXPECTED_HANDLING_MARKER, i)); @@ -66,8 +62,7 @@ public class AWSExceptionManagerTest { @Test public void testDefaultHandler() { - final String EXPECTED_HANDLING_MARKER = "Handled-Default"; - AWSExceptionManager manager = new AWSExceptionManager().defaultFunction(i -> new RuntimeException(EXPECTED_HANDLING_MARKER, i)); + manager.defaultFunction(i -> new RuntimeException(EXPECTED_HANDLING_MARKER, i)); manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i)); manager.add(Exception.class, i -> new RuntimeException("RawException", i)); @@ -83,8 +78,6 @@ public class AWSExceptionManagerTest { @Test public void testIdHandler() { - AWSExceptionManager manager = new AWSExceptionManager(); - manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i)); manager.add(Exception.class, i -> new RuntimeException("RawException", i)); manager.add(IllegalStateException.class, i -> i); @@ -106,5 +99,4 @@ public class AWSExceptionManagerTest { this.additionalMessage = additionalMessage; } } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java index db28261e..458792af 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java @@ -1,8 +1,5 @@ package software.amazon.kinesis.retrieval; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; - import java.time.Instant; import java.util.Date; import java.util.function.Consumer; @@ -10,7 +7,6 @@ import java.util.function.Function; import java.util.function.Supplier; import org.junit.Test; - import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; import software.amazon.awssdk.services.kinesis.model.ShardIteratorType; import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; @@ -18,6 +14,9 @@ import software.amazon.kinesis.checkpoint.SentinelCheckpoint; import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + public class IteratorBuilderTest { private static final String SHARD_ID = "Shard-001"; @@ -53,7 +52,11 @@ public class IteratorBuilderTest { @Test public void subscribeReconnectTest() { - sequenceNumber(this::stsBase, this::verifyStsBase, IteratorBuilder::reconnectRequest, WrappedRequest::wrapped, + sequenceNumber( + this::stsBase, + this::verifyStsBase, + IteratorBuilder::reconnectRequest, + WrappedRequest::wrapped, ShardIteratorType.AFTER_SEQUENCE_NUMBER); } @@ -64,7 +67,11 @@ public class IteratorBuilderTest { @Test public void getShardIteratorReconnectTest() { - sequenceNumber(this::gsiBase, this::verifyGsiBase, IteratorBuilder::reconnectRequest, WrappedRequest::wrapped, + sequenceNumber( + this::gsiBase, + this::verifyGsiBase, + IteratorBuilder::reconnectRequest, + WrappedRequest::wrapped, ShardIteratorType.AFTER_SEQUENCE_NUMBER); } @@ -78,55 +85,108 @@ public class IteratorBuilderTest { timeStampTest(this::gsiBase, this::verifyGsiBase, IteratorBuilder::request, WrappedRequest::wrapped); } - private interface IteratorApply { T apply(T base, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended); } - private void latestTest(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, + private void latestTest( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, Function> toRequest) { String sequenceNumber = SentinelCheckpoint.LATEST.name(); - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, sequenceNumber, initialPosition, - ShardIteratorType.LATEST, null, null); + InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + updateTest( + supplier, + baseVerifier, + iteratorRequest, + toRequest, + sequenceNumber, + initialPosition, + ShardIteratorType.LATEST, + null, + null); } - private void trimHorizonTest(Supplier supplier, Consumer baseVerifier, - IteratorApply iteratorRequest, Function> toRequest) { + private void trimHorizonTest( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, + Function> toRequest) { String sequenceNumber = SentinelCheckpoint.TRIM_HORIZON.name(); - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, sequenceNumber, initialPosition, - ShardIteratorType.TRIM_HORIZON, null, null); + InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + updateTest( + supplier, + baseVerifier, + iteratorRequest, + toRequest, + sequenceNumber, + initialPosition, + ShardIteratorType.TRIM_HORIZON, + null, + null); } - private void sequenceNumber(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, + private void sequenceNumber( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, Function> toRequest) { sequenceNumber(supplier, baseVerifier, iteratorRequest, toRequest, ShardIteratorType.AT_SEQUENCE_NUMBER); } - private void sequenceNumber(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, - Function> toRequest, ShardIteratorType shardIteratorType) { - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, SEQUENCE_NUMBER, initialPosition, - shardIteratorType, "1234", null); + private void sequenceNumber( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, + Function> toRequest, + ShardIteratorType shardIteratorType) { + InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + updateTest( + supplier, + baseVerifier, + iteratorRequest, + toRequest, + SEQUENCE_NUMBER, + initialPosition, + shardIteratorType, + "1234", + null); } - private void timeStampTest(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, + private void timeStampTest( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, Function> toRequest) { String sequenceNumber = SentinelCheckpoint.AT_TIMESTAMP.name(); - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(TIMESTAMP.toEpochMilli())); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, sequenceNumber, initialPosition, - ShardIteratorType.AT_TIMESTAMP, null, TIMESTAMP); + InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(TIMESTAMP.toEpochMilli())); + updateTest( + supplier, + baseVerifier, + iteratorRequest, + toRequest, + sequenceNumber, + initialPosition, + ShardIteratorType.AT_TIMESTAMP, + null, + TIMESTAMP); } - private void updateTest(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, - Function> toRequest, String sequenceNumber, - InitialPositionInStreamExtended initialPositionInStream, ShardIteratorType expectedShardIteratorType, - String expectedSequenceNumber, Instant expectedTimestamp) { + private void updateTest( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, + Function> toRequest, + String sequenceNumber, + InitialPositionInStreamExtended initialPositionInStream, + ShardIteratorType expectedShardIteratorType, + String expectedSequenceNumber, + Instant expectedTimestamp) { T base = supplier.get(); T updated = iteratorRequest.apply(base, sequenceNumber, initialPositionInStream); WrappedRequest request = toRequest.apply(updated); @@ -134,7 +194,6 @@ public class IteratorBuilderTest { assertThat(request.shardIteratorType(), equalTo(expectedShardIteratorType)); assertThat(request.sequenceNumber(), equalTo(expectedSequenceNumber)); assertThat(request.timestamp(), equalTo(expectedTimestamp)); - } private interface WrappedRequest { @@ -214,5 +273,4 @@ public class IteratorBuilderTest { private GetShardIteratorRequest.Builder gsiBase() { return GetShardIteratorRequest.builder().shardId(SHARD_ID).streamName(STREAM_NAME); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/RetrievalConfigTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/RetrievalConfigTest.java index 041ac71e..31ede003 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/RetrievalConfigTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/RetrievalConfigTest.java @@ -3,16 +3,14 @@ package software.amazon.kinesis.retrieval; import java.util.Arrays; import java.util.Optional; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.mockito.Mockito.mock; -import static software.amazon.kinesis.common.InitialPositionInStream.LATEST; -import static software.amazon.kinesis.common.InitialPositionInStream.TRIM_HORIZON; - +import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.MockitoAnnotations; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamConfig; @@ -20,6 +18,16 @@ import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.SingleStreamTracker; import software.amazon.kinesis.processor.StreamTracker; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static software.amazon.kinesis.common.InitialPositionInStream.LATEST; +import static software.amazon.kinesis.common.InitialPositionInStream.TRIM_HORIZON; + +@RunWith(MockitoJUnitRunner.class) public class RetrievalConfigTest { private static final String APPLICATION_NAME = RetrievalConfigTest.class.getSimpleName(); @@ -27,29 +35,45 @@ public class RetrievalConfigTest { @Mock private KinesisAsyncClient mockKinesisClient; + @Mock + private MultiStreamTracker mockMultiStreamTracker; + @Before public void setUp() { - MockitoAnnotations.initMocks(this); + when(mockMultiStreamTracker.isMultiStream()).thenReturn(true); } @Test - public void testTrackerConstruction() { + public void testSingleStreamTrackerConstruction() { final String streamName = "single-stream"; - final RetrievalConfig configByName = createConfig(streamName); - final SingleStreamTracker singleTracker = new SingleStreamTracker(streamName); - final RetrievalConfig configBySingleTracker = createConfig(singleTracker); + final Arn streamArn = createArn(streamName); - for (final RetrievalConfig rc : Arrays.asList(configByName, configBySingleTracker)) { + for (final RetrievalConfig rc : Arrays.asList( + createConfig(streamName), + createConfig(new SingleStreamTracker(streamName)), + createConfig(streamArn), + createConfig(new SingleStreamTracker(streamArn)))) { assertEquals(Optional.empty(), rc.appStreamTracker().left()); - assertEquals(singleTracker, rc.streamTracker()); + assertEquals( + streamName, + rc.streamTracker() + .streamConfigList() + .get(0) + .streamIdentifier() + .streamName()); assertEquals(1, rc.streamTracker().streamConfigList().size()); assertFalse(rc.streamTracker().isMultiStream()); } + } + @Test + public void testMultiStreamTrackerConstruction() { final StreamTracker mockMultiStreamTracker = mock(MultiStreamTracker.class); final RetrievalConfig configByMultiTracker = createConfig(mockMultiStreamTracker); assertEquals(Optional.empty(), configByMultiTracker.appStreamTracker().right()); - assertEquals(mockMultiStreamTracker, configByMultiTracker.appStreamTracker().left().get()); + assertEquals( + mockMultiStreamTracker, + configByMultiTracker.appStreamTracker().left().get()); assertEquals(mockMultiStreamTracker, configByMultiTracker.streamTracker()); } @@ -60,8 +84,7 @@ public class RetrievalConfigTest { for (final StreamConfig sc : config.streamTracker().streamConfigList()) { assertEquals(LATEST, sc.initialPositionInStreamExtended().getInitialPositionInStream()); } - config.initialPositionInStreamExtended( - InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); + config.initialPositionInStreamExtended(InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); for (final StreamConfig sc : config.streamTracker().streamConfigList()) { assertEquals(TRIM_HORIZON, sc.initialPositionInStreamExtended().getInitialPositionInStream()); } @@ -69,17 +92,52 @@ public class RetrievalConfigTest { @Test(expected = IllegalArgumentException.class) public void testUpdateInitialPositionInMultiStream() { - final RetrievalConfig config = createConfig(mock(MultiStreamTracker.class)); - config.initialPositionInStreamExtended( - InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); + createConfig(mockMultiStreamTracker) + .initialPositionInStreamExtended(InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); + } + + /** + * Test that an invalid {@link RetrievalSpecificConfig} does not overwrite + * a valid one. + */ + @Test + public void testInvalidRetrievalSpecificConfig() { + final RetrievalSpecificConfig validConfig = mock(RetrievalSpecificConfig.class); + final RetrievalSpecificConfig invalidConfig = mock(RetrievalSpecificConfig.class); + doThrow(new IllegalArgumentException("womp womp")).when(invalidConfig).validateState(true); + + final RetrievalConfig config = createConfig(mockMultiStreamTracker); + assertNull(config.retrievalSpecificConfig()); + config.retrievalSpecificConfig(validConfig); + assertEquals(validConfig, config.retrievalSpecificConfig()); + + try { + config.retrievalSpecificConfig(invalidConfig); + Assert.fail("should throw"); + } catch (RuntimeException re) { + assertEquals(validConfig, config.retrievalSpecificConfig()); + } } private RetrievalConfig createConfig(String streamName) { return new RetrievalConfig(mockKinesisClient, streamName, APPLICATION_NAME); } + private RetrievalConfig createConfig(Arn streamArn) { + return new RetrievalConfig(mockKinesisClient, streamArn, APPLICATION_NAME); + } + private RetrievalConfig createConfig(StreamTracker streamTracker) { return new RetrievalConfig(mockKinesisClient, streamTracker, APPLICATION_NAME); } -} \ No newline at end of file + private static Arn createArn(String streamName) { + return Arn.builder() + .partition("aws") + .service("kinesis") + .region(Region.US_EAST_1.id()) + .accountId("123456789012") + .resource("stream/" + streamName) + .build(); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java index eec5ea9e..29162805 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java @@ -14,17 +14,16 @@ */ package software.amazon.kinesis.retrieval; -import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import org.slf4j.Logger; -import software.amazon.kinesis.retrieval.ThrottlingReporter; + +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; @RunWith(MockitoJUnitRunner.class) public class ThrottlingReporterTest { @@ -40,7 +39,6 @@ public class ThrottlingReporterTest { reporter.throttled(); verify(throttleLog).warn(anyString()); verify(throttleLog, never()).error(anyString()); - } @Test @@ -63,7 +61,6 @@ public class ThrottlingReporterTest { reporter.throttled(); verify(throttleLog, times(2)).warn(anyString()); verify(throttleLog, times(3)).error(anyString()); - } private class LogTestingThrottingReporter extends ThrottlingReporter { @@ -77,5 +74,4 @@ public class ThrottlingReporterTest { return throttleLog; } } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java index 4fee3d08..6684a22a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java @@ -15,30 +15,34 @@ package software.amazon.kinesis.retrieval.fanout; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import java.util.Optional; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.common.StreamConfig; +import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.leases.ShardInfo; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.retrieval.RetrievalFactory; -import java.util.Optional; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class FanOutConfigTest { @@ -50,137 +54,163 @@ public class FanOutConfigTest { @Mock private FanOutConsumerRegistration consumerRegistration; + @Mock private KinesisAsyncClient kinesisClient; + @Mock private StreamConfig streamConfig; + @Mock + private StreamIdentifier streamIdentifier; + + private FanOutConfig config; + @Before public void setup() { - when(streamConfig.consumerArn()).thenReturn(null); + config = spy(new FanOutConfig(kinesisClient)) + // DRY: set the most commonly-used parameters + .applicationName(TEST_APPLICATION_NAME) + .streamName(TEST_STREAM_NAME); + doReturn(consumerRegistration) + .when(config) + .createConsumerRegistration(eq(kinesisClient), anyString(), anyString()); + when(streamConfig.streamIdentifier()).thenReturn(streamIdentifier); + when(streamIdentifier.streamName()).thenReturn(TEST_STREAM_NAME); } @Test - public void testNoRegisterIfConsumerArnSet() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).consumerArn(TEST_CONSUMER_ARN); + public void testNoRegisterIfConsumerArnSet() { + config.consumerArn(TEST_CONSUMER_ARN) + // unset common parameters + .applicationName(null) + .streamName(null); + RetrievalFactory retrievalFactory = config.retrievalFactory(); - assertThat(retrievalFactory, not(nullValue())); - verify(consumerRegistration, never()).getOrCreateStreamConsumerArn(); + assertNotNull(retrievalFactory); + verifyZeroInteractions(consumerRegistration); } @Test public void testRegisterCalledWhenConsumerArnUnset() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .streamName(TEST_STREAM_NAME); - RetrievalFactory retrievalFactory = config.retrievalFactory(); - ShardInfo shardInfo = mock(ShardInfo.class); -// doReturn(Optional.of(StreamIdentifier.singleStreamInstance(TEST_STREAM_NAME).serialize())).when(shardInfo).streamIdentifier(); - doReturn(Optional.empty()).when(shardInfo).streamIdentifierSerOpt(); - retrievalFactory.createGetRecordsCache(shardInfo, streamConfig, mock(MetricsFactory.class)); - assertThat(retrievalFactory, not(nullValue())); + getRecordsCache(null); + verify(consumerRegistration).getOrCreateStreamConsumerArn(); } @Test public void testRegisterNotCalledWhenConsumerArnSetInMultiStreamMode() throws Exception { when(streamConfig.consumerArn()).thenReturn("consumerArn"); - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .streamName(TEST_STREAM_NAME); - RetrievalFactory retrievalFactory = config.retrievalFactory(); - ShardInfo shardInfo = mock(ShardInfo.class); - doReturn(Optional.of("account:stream:12345")).when(shardInfo).streamIdentifierSerOpt(); - retrievalFactory.createGetRecordsCache(shardInfo, streamConfig, mock(MetricsFactory.class)); - assertThat(retrievalFactory, not(nullValue())); + + getRecordsCache("123456789012:stream:12345"); + verify(consumerRegistration, never()).getOrCreateStreamConsumerArn(); } @Test public void testRegisterCalledWhenConsumerArnNotSetInMultiStreamMode() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .streamName(TEST_STREAM_NAME); - RetrievalFactory retrievalFactory = config.retrievalFactory(); - ShardInfo shardInfo = mock(ShardInfo.class); - doReturn(Optional.of("account:stream:12345")).when(shardInfo).streamIdentifierSerOpt(); - retrievalFactory.createGetRecordsCache(shardInfo, streamConfig, mock(MetricsFactory.class)); - assertThat(retrievalFactory, not(nullValue())); + getRecordsCache("123456789012:stream:12345"); + verify(consumerRegistration).getOrCreateStreamConsumerArn(); } @Test public void testDependencyExceptionInConsumerCreation() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .streamName(TEST_STREAM_NAME); DependencyException de = new DependencyException("Bad", null); when(consumerRegistration.getOrCreateStreamConsumerArn()).thenThrow(de); + try { - config.retrievalFactory(); + getRecordsCache(null); + Assert.fail("should throw"); } catch (RuntimeException e) { verify(consumerRegistration).getOrCreateStreamConsumerArn(); - assertThat(e.getCause(), equalTo(de)); + assertEquals(de, e.getCause()); } } @Test - public void testCreationWithApplicationName() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .streamName(TEST_STREAM_NAME); - RetrievalFactory factory = config.retrievalFactory(); - ShardInfo shardInfo = mock(ShardInfo.class); - doReturn(Optional.empty()).when(shardInfo).streamIdentifierSerOpt(); - factory.createGetRecordsCache(shardInfo, streamConfig, mock(MetricsFactory.class)); - assertThat(factory, not(nullValue())); + public void testCreationWithApplicationName() { + getRecordsCache(null); - TestingConfig testingConfig = (TestingConfig) config; - assertThat(testingConfig.stream, equalTo(TEST_STREAM_NAME)); - assertThat(testingConfig.consumerToCreate, equalTo(TEST_APPLICATION_NAME)); + assertEquals(TEST_STREAM_NAME, config.streamName()); + assertEquals(TEST_APPLICATION_NAME, config.applicationName()); } @Test - public void testCreationWithConsumerName() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).consumerName(TEST_CONSUMER_NAME) - .streamName(TEST_STREAM_NAME); - RetrievalFactory factory = config.retrievalFactory(); - ShardInfo shardInfo = mock(ShardInfo.class); - doReturn(Optional.empty()).when(shardInfo).streamIdentifierSerOpt(); - factory.createGetRecordsCache(shardInfo, streamConfig, mock(MetricsFactory.class)); - assertThat(factory, not(nullValue())); - TestingConfig testingConfig = (TestingConfig) config; - assertThat(testingConfig.stream, equalTo(TEST_STREAM_NAME)); - assertThat(testingConfig.consumerToCreate, equalTo(TEST_CONSUMER_NAME)); + public void testCreationWithConsumerName() { + config.consumerName(TEST_CONSUMER_NAME) + // unset common parameters + .applicationName(null); + + getRecordsCache(null); + + assertEquals(TEST_STREAM_NAME, config.streamName()); + assertEquals(TEST_CONSUMER_NAME, config.consumerName()); } @Test - public void testCreationWithBothConsumerApplication() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .consumerName(TEST_CONSUMER_NAME).streamName(TEST_STREAM_NAME); - RetrievalFactory factory = config.retrievalFactory(); - ShardInfo shardInfo = mock(ShardInfo.class); - doReturn(Optional.empty()).when(shardInfo).streamIdentifierSerOpt(); + public void testCreationWithBothConsumerApplication() { + config = config.consumerName(TEST_CONSUMER_NAME); + + getRecordsCache(null); + + assertEquals(TEST_STREAM_NAME, config.streamName()); + assertEquals(TEST_CONSUMER_NAME, config.consumerName()); + } + + @Test + public void testValidState() { + assertNull(config.consumerArn()); + assertNotNull(config.streamName()); + + config.validateState(false); + + // both streamName and consumerArn are non-null + config.consumerArn(TEST_CONSUMER_ARN); + config.validateState(false); + + config.consumerArn(null); + config.streamName(null); + config.validateState(false); + config.validateState(true); + + assertNull(config.streamName()); + assertNull(config.consumerArn()); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidStateMultiWithStreamName() { + testInvalidState(TEST_STREAM_NAME, null); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidStateMultiWithConsumerArn() { + testInvalidState(null, TEST_CONSUMER_ARN); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidStateMultiWithStreamNameAndConsumerArn() { + testInvalidState(TEST_STREAM_NAME, TEST_CONSUMER_ARN); + } + + private void testInvalidState(final String streamName, final String consumerArn) { + config.streamName(streamName); + config.consumerArn(consumerArn); + + try { + config.validateState(true); + } finally { + assertEquals(streamName, config.streamName()); + assertEquals(consumerArn, config.consumerArn()); + } + } + + private void getRecordsCache(final String streamIdentifer) { + final ShardInfo shardInfo = mock(ShardInfo.class); + when(shardInfo.streamIdentifierSerOpt()).thenReturn(Optional.ofNullable(streamIdentifer)); + + final RetrievalFactory factory = config.retrievalFactory(); factory.createGetRecordsCache(shardInfo, streamConfig, mock(MetricsFactory.class)); - assertThat(factory, not(nullValue())); - - TestingConfig testingConfig = (TestingConfig) config; - assertThat(testingConfig.stream, equalTo(TEST_STREAM_NAME)); - assertThat(testingConfig.consumerToCreate, equalTo(TEST_CONSUMER_NAME)); } - - private class TestingConfig extends FanOutConfig { - - String stream; - String consumerToCreate; - - public TestingConfig(KinesisAsyncClient kinesisClient) { - super(kinesisClient); - } - - @Override - protected FanOutConsumerRegistration createConsumerRegistration(KinesisAsyncClient client, String stream, - String consumerToCreate) { - this.stream = stream; - this.consumerToCreate = consumerToCreate; - return consumerRegistration; - } - } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java index 245e22d5..e9d11b0a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java @@ -15,26 +15,14 @@ package software.amazon.kinesis.retrieval.fanout; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.concurrent.CompletableFuture; import org.apache.commons.lang3.StringUtils; -import org.hamcrest.Matchers; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.Consumer; import software.amazon.awssdk.services.kinesis.model.ConsumerDescription; @@ -51,6 +39,15 @@ import software.amazon.awssdk.services.kinesis.model.StreamDescriptionSummary; import software.amazon.awssdk.services.kinesis.model.StreamStatus; import software.amazon.kinesis.leases.exceptions.DependencyException; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + /** * */ @@ -72,19 +69,21 @@ public class FanOutConsumerRegistrationTest { @Before public void setup() { - consumerRegistration = new FanOutConsumerRegistration(client, STREAM_NAME, CONSUMER_NAME, MAX_DSS_RETRIES, - MAX_DSC_RETRIES, RSC_RETRIES, BACKOFF_MILLIS); + consumerRegistration = new FanOutConsumerRegistration( + client, STREAM_NAME, CONSUMER_NAME, MAX_DSS_RETRIES, MAX_DSC_RETRIES, RSC_RETRIES, BACKOFF_MILLIS); } @Test public void testConsumerAlreadyExists() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dscFuture = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); final String consumerArn = consumerRegistration.getOrCreateStreamConsumerArn(); @@ -95,13 +94,15 @@ public class FanOutConsumerRegistrationTest { @Test public void testConsumerAlreadyExistsMultipleCalls() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dscFuture = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); final String firstCall = consumerRegistration.getOrCreateStreamConsumerArn(); @@ -115,27 +116,28 @@ public class FanOutConsumerRegistrationTest { @Test(expected = LimitExceededException.class) public void testDescribeStreamConsumerThrottled() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); final CompletableFuture dscFuture = CompletableFuture.supplyAsync(() -> { throw LimitExceededException.builder().build(); }); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); try { consumerRegistration.getOrCreateStreamConsumerArn(); } finally { - verify(client, times(MAX_DSC_RETRIES)) - .describeStreamConsumer(any(DescribeStreamConsumerRequest.class)); + verify(client, times(MAX_DSC_RETRIES)).describeStreamConsumer(any(DescribeStreamConsumerRequest.class)); } } @Test(expected = DependencyException.class) public void testRegisterStreamConsumerThrottled() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); final CompletableFuture dscFuture = CompletableFuture.supplyAsync(() -> { throw ResourceNotFoundException.builder().build(); }); @@ -143,36 +145,42 @@ public class FanOutConsumerRegistrationTest { throw LimitExceededException.builder().build(); }); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); - when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))).thenReturn(rscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); + when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))) + .thenReturn(rscFuture); try { consumerRegistration.getOrCreateStreamConsumerArn(); } finally { - verify(client, times(RSC_RETRIES)) - .registerStreamConsumer(any(RegisterStreamConsumerRequest.class)); + verify(client, times(RSC_RETRIES)).registerStreamConsumer(any(RegisterStreamConsumerRequest.class)); } } @Test public void testNewRegisterStreamConsumer() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); final CompletableFuture failureResponse = CompletableFuture.supplyAsync(() -> { throw ResourceNotFoundException.builder().build(); }); - final CompletableFuture intermidateResponse = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); - final CompletableFuture successResponse = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); - final CompletableFuture rscFuture = CompletableFuture - .completedFuture(createRegisterStreamConsumerResponse()); + final CompletableFuture intermidateResponse = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); + final CompletableFuture successResponse = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); + final CompletableFuture rscFuture = + CompletableFuture.completedFuture(createRegisterStreamConsumerResponse()); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(failureResponse) - .thenReturn(intermidateResponse).thenReturn(successResponse); - when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))).thenReturn(rscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(failureResponse) + .thenReturn(intermidateResponse) + .thenReturn(successResponse); + when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))) + .thenReturn(rscFuture); final long startTime = System.currentTimeMillis(); final String consumerArn = consumerRegistration.getOrCreateStreamConsumerArn(); @@ -186,23 +194,23 @@ public class FanOutConsumerRegistrationTest { @Test(expected = IllegalStateException.class) public void testStreamConsumerStuckInCreating() throws Exception { - final CompletableFuture dssFuture = CompletableFuture.completedFuture( - createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dscFuture = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); try { consumerRegistration.getOrCreateStreamConsumerArn(); } finally { // Verify that the call to DSC was made for the max retry attempts and one for the initial response object. - verify(client, times(MAX_DSC_RETRIES + 1)) - .describeStreamConsumer(any(DescribeStreamConsumerRequest.class)); + verify(client, times(MAX_DSC_RETRIES + 1)).describeStreamConsumer(any(DescribeStreamConsumerRequest.class)); verify(client, never()).registerStreamConsumer(any(RegisterStreamConsumerRequest.class)); } - } private DescribeStreamSummaryRequest createDescribeStreamSummaryRequest() { @@ -210,29 +218,49 @@ public class FanOutConsumerRegistrationTest { } private DescribeStreamSummaryResponse createDescribeStreamSummaryResponse() { - return DescribeStreamSummaryResponse.builder().streamDescriptionSummary(StreamDescriptionSummary.builder() - .streamName(STREAM_NAME).streamARN(STREAM_ARN).streamStatus(StreamStatus.ACTIVE).build()).build(); + return DescribeStreamSummaryResponse.builder() + .streamDescriptionSummary(StreamDescriptionSummary.builder() + .streamName(STREAM_NAME) + .streamARN(STREAM_ARN) + .streamStatus(StreamStatus.ACTIVE) + .build()) + .build(); } private DescribeStreamConsumerRequest createDescribeStreamConsumerRequest(final String consumerArn) { if (StringUtils.isEmpty(consumerArn)) { - return DescribeStreamConsumerRequest.builder().streamARN(STREAM_ARN).consumerName(CONSUMER_NAME).build(); + return DescribeStreamConsumerRequest.builder() + .streamARN(STREAM_ARN) + .consumerName(CONSUMER_NAME) + .build(); } return DescribeStreamConsumerRequest.builder().consumerARN(consumerArn).build(); } private DescribeStreamConsumerResponse createDescribeStreamConsumerResponse(final ConsumerStatus status) { - return DescribeStreamConsumerResponse.builder().consumerDescription(ConsumerDescription.builder() - .consumerStatus(status).consumerARN(CONSUMER_ARN).consumerName(CONSUMER_NAME).build()).build(); + return DescribeStreamConsumerResponse.builder() + .consumerDescription(ConsumerDescription.builder() + .consumerStatus(status) + .consumerARN(CONSUMER_ARN) + .consumerName(CONSUMER_NAME) + .build()) + .build(); } private RegisterStreamConsumerRequest createRegisterStreamConsumerRequest() { - return RegisterStreamConsumerRequest.builder().streamARN(STREAM_ARN).consumerName(CONSUMER_NAME).build(); + return RegisterStreamConsumerRequest.builder() + .streamARN(STREAM_ARN) + .consumerName(CONSUMER_NAME) + .build(); } private RegisterStreamConsumerResponse createRegisterStreamConsumerResponse() { - return RegisterStreamConsumerResponse.builder().consumer(Consumer.builder().consumerName(CONSUMER_NAME) - .consumerARN(CONSUMER_ARN).consumerStatus(ConsumerStatus.CREATING).build()).build(); + return RegisterStreamConsumerResponse.builder() + .consumer(Consumer.builder() + .consumerName(CONSUMER_NAME) + .consumerARN(CONSUMER_ARN) + .consumerStatus(ConsumerStatus.CREATING) + .build()) + .build(); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java index 40d86c49..cf135159 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java @@ -1,5 +1,27 @@ package software.amazon.kinesis.retrieval.fanout; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CompletionException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.netty.handler.timeout.ReadTimeoutException; import io.reactivex.rxjava3.core.Flowable; @@ -8,7 +30,6 @@ import io.reactivex.rxjava3.schedulers.Schedulers; import io.reactivex.rxjava3.subscribers.SafeSubscriber; import lombok.Data; import lombok.RequiredArgsConstructor; -import lombok.Setter; import lombok.extern.slf4j.Slf4j; import org.hamcrest.Description; import org.hamcrest.Matcher; @@ -46,29 +67,6 @@ import software.amazon.kinesis.retrieval.RetryableRetrievalException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; import software.amazon.kinesis.utils.SubscribeToShardRequestMatcher; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; - import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -77,7 +75,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Matchers.argThat; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.never; @@ -96,68 +93,78 @@ public class FanOutRecordsPublisherTest { @Mock private KinesisAsyncClient kinesisClient; + @Mock private SdkPublisher publisher; + @Mock private Subscription subscription; + @Mock private Subscriber subscriber; private SubscribeToShardEvent batchEvent; @Test - public void simpleTest() throws Exception { + public void testSimple() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); - source.subscribe(new ShardConsumerNotifyingSubscriber(new Subscriber() { - Subscription subscription; + source.subscribe(new ShardConsumerNotifyingSubscriber( + new Subscriber() { + Subscription subscription; - @Override public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(1); - } + @Override + public void onSubscribe(Subscription s) { + subscription = s; + subscription.request(1); + } - @Override public void onNext(RecordsRetrieved input) { - receivedInput.add(input.processRecordsInput()); - subscription.request(1); - } + @Override + public void onNext(RecordsRetrieved input) { + receivedInput.add(input.processRecordsInput()); + subscription.request(1); + } - @Override public void onError(Throwable t) { - log.error("Caught throwable in subscriber", t); - fail("Caught throwable in subscriber"); - } + @Override + public void onError(Throwable t) { + log.error("Caught throwable in subscriber", t); + fail("Caught throwable in subscriber"); + } - @Override public void onComplete() { - fail("OnComplete called when not expected"); - } - }, source)); + @Override + public void onComplete() { + fail("OnComplete called when not expected"); + } + }, + source)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder() - .millisBehindLatest(100L) - .records(records) - .continuationSequenceNumber("test") - .childShards(Collections.emptyList()) - .build(); + .millisBehindLatest(100L) + .records(records) + .continuationSequenceNumber("test") + .childShards(Collections.emptyList()) + .build(); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); @@ -172,58 +179,72 @@ public class FanOutRecordsPublisherTest { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); - } @Test - public void InvalidEventTest() throws Exception { + public void testInvalidEvent() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); + source.start( + ExtendedSequenceNumber.LATEST, + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); - source.subscribe(new ShardConsumerNotifyingSubscriber(new Subscriber() { - Subscription subscription; + source.subscribe(new ShardConsumerNotifyingSubscriber( + new Subscriber() { + Subscription subscription; - @Override public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(1); - } + @Override + public void onSubscribe(Subscription s) { + subscription = s; + subscription.request(1); + } - @Override public void onNext(RecordsRetrieved input) { - receivedInput.add(input.processRecordsInput()); - subscription.request(1); - } + @Override + public void onNext(RecordsRetrieved input) { + receivedInput.add(input.processRecordsInput()); + subscription.request(1); + } - @Override public void onError(Throwable t) { - log.error("Caught throwable in subscriber", t); - fail("Caught throwable in subscriber"); - } + @Override + public void onError(Throwable t) { + log.error("Caught throwable in subscriber", t); + fail("Caught throwable in subscriber"); + } - @Override public void onComplete() { - fail("OnComplete called when not expected"); - } - }, source)); + @Override + public void onComplete() { + fail("OnComplete called when not expected"); + } + }, + source)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records).continuationSequenceNumber(CONTINUATION_SEQUENCE_NUMBER).build(); - SubscribeToShardEvent invalidEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records).childShards(Collections.emptyList()).build(); + batchEvent = SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .records(records) + .continuationSequenceNumber(CONTINUATION_SEQUENCE_NUMBER) + .build(); + SubscribeToShardEvent invalidEvent = SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .records(records) + .childShards(Collections.emptyList()) + .build(); captor.getValue().onNext(batchEvent); captor.getValue().onNext(invalidEvent); @@ -239,21 +260,21 @@ public class FanOutRecordsPublisherTest { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); - } @Test - public void testIfAllEventsReceivedWhenNoTasksRejectedByExecutor() throws Exception { + public void testIfAllEventsReceivedWhenNoTasksRejectedByExecutor() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -262,30 +283,37 @@ public class FanOutRecordsPublisherTest { new Subscriber() { Subscription subscription; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); Scheduler testScheduler = getScheduler(getBlockingExecutor(getSpiedExecutor(getTestExecutor()))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); @@ -293,17 +321,16 @@ public class FanOutRecordsPublisherTest { captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); Stream.of("1000", "2000", "3000") - .map(contSeqNum -> - SubscribeToShardEvent.builder() - .millisBehindLatest(100L) - .continuationSequenceNumber(contSeqNum) - .records(records) - .childShards(Collections.emptyList()) - .build()) + .map(contSeqNum -> SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .continuationSequenceNumber(contSeqNum) + .records(records) + .childShards(Collections.emptyList()) + .build()) .forEach(batchEvent -> captor.getValue().onNext(batchEvent)); verify(subscription, times(4)).request(1); @@ -317,21 +344,21 @@ public class FanOutRecordsPublisherTest { }); assertThat(source.getCurrentSequenceNumber(), equalTo("3000")); - } @Test - public void testIfEventsAreNotDeliveredToShardConsumerWhenPreviousEventDeliveryTaskGetsRejected() throws Exception { + public void testIfEventsAreNotDeliveredToShardConsumerWhenPreviousEventDeliveryTaskGetsRejected() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -340,30 +367,37 @@ public class FanOutRecordsPublisherTest { new Subscriber() { Subscription subscription; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); Scheduler testScheduler = getScheduler(getOverwhelmedBlockingExecutor(getSpiedExecutor(getTestExecutor()))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(new SafeSubscriber<>(shardConsumerSubscriber)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); @@ -371,17 +405,16 @@ public class FanOutRecordsPublisherTest { captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); Stream.of("1000", "2000", "3000") - .map(contSeqNum -> - SubscribeToShardEvent.builder() - .millisBehindLatest(100L) - .continuationSequenceNumber(contSeqNum) - .records(records) - .childShards(Collections.emptyList()) - .build()) + .map(contSeqNum -> SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .continuationSequenceNumber(contSeqNum) + .records(records) + .childShards(Collections.emptyList()) + .build()) .forEach(batchEvent -> captor.getValue().onNext(batchEvent)); verify(subscription, times(2)).request(1); @@ -395,22 +428,21 @@ public class FanOutRecordsPublisherTest { }); assertThat(source.getCurrentSequenceNumber(), equalTo("1000")); - } @Test public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -420,12 +452,16 @@ public class FanOutRecordsPublisherTest { CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(2); int totalServicePublisherEvents = 1000; int initialDemand = 0; - BackpressureAdheringServicePublisher servicePublisher = - new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, initialDemand); + BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, + initialDemand); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -435,45 +471,54 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); + assertEquals( + "" + ++lastSeenSeqNum, + ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); - if(receivedInput.size() == totalServicePublisherEvents) { + if (receivedInput.size() == totalServicePublisherEvents) { servicePublisherTaskCompletionLatch.countDown(); } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -488,32 +533,34 @@ public class FanOutRecordsPublisherTest { }); assertThat(source.getCurrentSequenceNumber(), equalTo(totalServicePublisherEvents + "")); - } @Test - public void testIfStreamOfEventsAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { - + public void testIfStreamOfEventsAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() + throws Exception { CountDownLatch onS2SCallLatch = new CountDownLatch(2); doAnswer(new Answer() { - @Override public Object answer(InvocationOnMock invocation) throws Throwable { - onS2SCallLatch.countDown(); - return null; - } - }).when(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), any()); + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + onS2SCallLatch.countDown(); + return null; + } + }) + .when(kinesisClient) + .subscribeToShard(any(SubscribeToShardRequest.class), any()); FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -525,13 +572,17 @@ public class FanOutRecordsPublisherTest { int initialDemand = 9; int triggerCompleteAtNthEvent = 200; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( - servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, initialDemand); - servicePublisher.setCompleteTrigger(triggerCompleteAtNthEvent, () -> flowCaptor.getValue().complete()); + servicePublisher.setCompleteTrigger( + triggerCompleteAtNthEvent, () -> flowCaptor.getValue().complete()); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -541,37 +592,46 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); + assertEquals( + "" + ++lastSeenSeqNum, + ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); - if(receivedInput.size() == triggerCompleteAtNthEvent) { + if (receivedInput.size() == triggerCompleteAtNthEvent) { servicePublisherTaskCompletionLatch.countDown(); } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient, times(1)).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); @@ -579,8 +639,8 @@ public class FanOutRecordsPublisherTest { flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -599,22 +659,22 @@ public class FanOutRecordsPublisherTest { // Let's wait for sometime to allow the publisher to re-subscribe onS2SCallLatch.await(5000, TimeUnit.MILLISECONDS); verify(kinesisClient, times(2)).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); - } @Test - public void testIfShardEndEventAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { + public void testIfShardEndEventAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() + throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -625,19 +685,19 @@ public class FanOutRecordsPublisherTest { List parentShards = new ArrayList<>(); parentShards.add(SHARD_ID); ChildShard leftChild = ChildShard.builder() - .shardId("Shard-002") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) - .build(); + .shardId("Shard-002") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) + .build(); ChildShard rightChild = ChildShard.builder() - .shardId("Shard-003") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) - .build(); + .shardId("Shard-003") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) + .build(); childShards.add(leftChild); childShards.add(rightChild); - Consumer servicePublisherShardEndAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherShardEndAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(null) .records(records) @@ -650,58 +710,67 @@ public class FanOutRecordsPublisherTest { int initialDemand = 9; int triggerCompleteAtNthEvent = 200; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( - servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, initialDemand); - servicePublisher - .setShardEndAndCompleteTrigger(triggerCompleteAtNthEvent, () -> flowCaptor.getValue().complete(), - servicePublisherShardEndAction); + servicePublisher.setShardEndAndCompleteTrigger( + triggerCompleteAtNthEvent, () -> flowCaptor.getValue().complete(), servicePublisherShardEndAction); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); - final boolean[] isOnCompleteTriggered = { false }; + final boolean[] isOnCompleteTriggered = {false}; Subscriber shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber() { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); servicePublisher.request(1); - if(receivedInput.size() == triggerCompleteAtNthEvent) { + if (receivedInput.size() == triggerCompleteAtNthEvent) { servicePublisherTaskCompletionLatch.countDown(); } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { isOnCompleteTriggered[0] = true; onCompleteLatch.countDown(); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient, times(1)).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); @@ -709,8 +778,8 @@ public class FanOutRecordsPublisherTest { flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -728,22 +797,22 @@ public class FanOutRecordsPublisherTest { // With shard end event, onComplete must be propagated to the subscriber. onCompleteLatch.await(5000, TimeUnit.MILLISECONDS); assertTrue("OnComplete should be triggered", isOnCompleteTriggered[0]); - } @Test - public void testIfStreamOfEventsAndOnErrorAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { + public void testIfStreamOfEventsAndOnErrorAreDeliveredInOrderWithBackpressureAdheringServicePublisher() + throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -756,17 +825,21 @@ public class FanOutRecordsPublisherTest { int initialDemand = 9; int triggerErrorAtNthEvent = 241; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( - servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, initialDemand); - servicePublisher.setErrorTrigger(triggerErrorAtNthEvent, + servicePublisher.setErrorTrigger( + triggerErrorAtNthEvent, () -> flowCaptor.getValue().exceptionOccurred(new RuntimeException("Service Exception"))); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); - final boolean[] isOnErrorThrown = { false }; + final boolean[] isOnErrorThrown = {false}; List receivedInput = new ArrayList<>(); @@ -775,46 +848,55 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); + assertEquals( + "" + ++lastSeenSeqNum, + ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); - if(receivedInput.size() == triggerErrorAtNthEvent) { + if (receivedInput.size() == triggerErrorAtNthEvent) { servicePublisherTaskCompletionLatch.countDown(); } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); isOnErrorThrown[0] = true; onErrorReceiveLatch.countDown(); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -831,22 +913,23 @@ public class FanOutRecordsPublisherTest { assertThat(source.getCurrentSequenceNumber(), equalTo(triggerErrorAtNthEvent + "")); onErrorReceiveLatch.await(5000, TimeUnit.MILLISECONDS); assertTrue("OnError should have been thrown", isOnErrorThrown[0]); - } @Test - public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstWithinLimit() throws Exception { + public void + testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstWithinLimit() + throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -856,12 +939,16 @@ public class FanOutRecordsPublisherTest { CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(2); int totalServicePublisherEvents = 1000; int initialDemand = 9; - BackpressureAdheringServicePublisher servicePublisher = - new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, initialDemand); + BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, + initialDemand); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -871,45 +958,54 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); + assertEquals( + "" + ++lastSeenSeqNum, + ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); - if(receivedInput.size() == totalServicePublisherEvents) { + if (receivedInput.size() == totalServicePublisherEvents) { servicePublisherTaskCompletionLatch.countDown(); } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -924,22 +1020,23 @@ public class FanOutRecordsPublisherTest { }); assertThat(source.getCurrentSequenceNumber(), equalTo(totalServicePublisherEvents + "")); - } @Test - public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstOverLimit() throws Exception { + public void + testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstOverLimit() + throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -949,12 +1046,16 @@ public class FanOutRecordsPublisherTest { CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(1); int totalServicePublisherEvents = 1000; int initialDemand = 11; - BackpressureAdheringServicePublisher servicePublisher = - new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, initialDemand); + BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, + initialDemand); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -965,43 +1066,52 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); + assertEquals( + "" + ++lastSeenSeqNum, + ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); onErrorSet.set(true); servicePublisherTaskCompletionLatch.countDown(); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -1021,8 +1131,12 @@ public class FanOutRecordsPublisherTest { } private ExecutorService getTestExecutor() { - return Executors.newFixedThreadPool(8, - new ThreadFactoryBuilder().setNameFormat("test-fanout-record-publisher-%04d").setDaemon(true).build()); + return Executors.newFixedThreadPool( + 8, + new ThreadFactoryBuilder() + .setNameFormat("test-fanout-record-publisher-%04d") + .setDaemon(true) + .build()); } private ExecutorService getSpiedExecutor(ExecutorService executorService) { @@ -1030,25 +1144,29 @@ public class FanOutRecordsPublisherTest { } private ExecutorService getBlockingExecutor(ExecutorService executorService) { - doAnswer(invocation -> directlyExecuteRunnable(invocation)).when(executorService).execute(any()); + doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .when(executorService) + .execute(any()); return executorService; } private ExecutorService getInitiallyBlockingExecutor(ExecutorService executorService) { doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doCallRealMethod() - .when(executorService).execute(any()); + .doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .doCallRealMethod() + .when(executorService) + .execute(any()); return executorService; } private ExecutorService getOverwhelmedBlockingExecutor(ExecutorService executorService) { doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doThrow(new RejectedExecutionException()) - .doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .when(executorService).execute(any()); + .doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .doThrow(new RejectedExecutionException()) + .doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .when(executorService) + .execute(any()); return executorService; } @@ -1063,55 +1181,62 @@ public class FanOutRecordsPublisherTest { public void largeRequestTest() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); - source.subscribe(new ShardConsumerNotifyingSubscriber(new Subscriber() { - Subscription subscription; + source.subscribe(new ShardConsumerNotifyingSubscriber( + new Subscriber() { + Subscription subscription; - @Override public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(3); - } + @Override + public void onSubscribe(Subscription s) { + subscription = s; + subscription.request(3); + } - @Override public void onNext(RecordsRetrieved input) { - receivedInput.add(input.processRecordsInput()); - subscription.request(1); - } + @Override + public void onNext(RecordsRetrieved input) { + receivedInput.add(input.processRecordsInput()); + subscription.request(1); + } - @Override public void onError(Throwable t) { - log.error("Caught throwable in subscriber", t); - fail("Caught throwable in subscriber"); - } + @Override + public void onError(Throwable t) { + log.error("Caught throwable in subscriber", t); + fail("Caught throwable in subscriber"); + } - @Override public void onComplete() { - fail("OnComplete called when not expected"); - } - }, source)); + @Override + public void onComplete() { + fail("OnComplete called when not expected"); + } + }, + source)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder() - .millisBehindLatest(100L) - .records(records) - .continuationSequenceNumber(CONTINUATION_SEQUENCE_NUMBER) - .childShards(Collections.emptyList()) - .build(); + .millisBehindLatest(100L) + .records(records) + .continuationSequenceNumber(CONTINUATION_SEQUENCE_NUMBER) + .childShards(Collections.emptyList()) + .build(); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); @@ -1126,22 +1251,22 @@ public class FanOutRecordsPublisherTest { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); - } @Test public void testResourceNotFoundForShard() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); ArgumentCaptor inputCaptor = ArgumentCaptor.forClass(RecordsRetrieved.class); source.subscribe(subscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); FanOutRecordsPublisher.RecordFlow recordFlow = flowCaptor.getValue(); - recordFlow.exceptionOccurred(new RuntimeException(ResourceNotFoundException.builder().build())); + recordFlow.exceptionOccurred( + new RuntimeException(ResourceNotFoundException.builder().build())); verify(subscriber).onSubscribe(any()); verify(subscriber, never()).onError(any()); @@ -1157,8 +1282,8 @@ public class FanOutRecordsPublisherTest { public void testReadTimeoutExceptionForShard() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); source.subscribe(subscriber); @@ -1176,52 +1301,67 @@ public class FanOutRecordsPublisherTest { public void testContinuesAfterSequence() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(new ExtendedSequenceNumber("0"), + source.start( + new ExtendedSequenceNumber("0"), InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); NonFailingSubscriber nonFailingSubscriber = new NonFailingSubscriber(); source.subscribe(new ShardConsumerNotifyingSubscriber(nonFailingSubscriber, source)); - SubscribeToShardRequest expected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN).shardId(SHARD_ID) - .startingPosition(StartingPosition.builder().sequenceNumber("0") - .type(ShardIteratorType.AT_SEQUENCE_NUMBER).build()) + SubscribeToShardRequest expected = SubscribeToShardRequest.builder() + .consumerARN(CONSUMER_ARN) + .shardId(SHARD_ID) + .startingPosition(StartingPosition.builder() + .sequenceNumber("0") + .type(ShardIteratorType.AT_SEQUENCE_NUMBER) + .build()) .build(); - verify(kinesisClient).subscribeToShard(argThat(new SubscribeToShardRequestMatcher(expected)), flowCaptor.capture()); + verify(kinesisClient) + .subscribeToShard(argThat(new SubscribeToShardRequestMatcher(expected)), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records) - .continuationSequenceNumber("3").childShards(Collections.emptyList()).build(); + batchEvent = SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .records(records) + .continuationSequenceNumber("3") + .childShards(Collections.emptyList()) + .build(); captor.getValue().onNext(batchEvent); captor.getValue().onComplete(); flowCaptor.getValue().complete(); - ArgumentCaptor nextSubscribeCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor nextFlowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor nextSubscribeCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor nextFlowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); - SubscribeToShardRequest nextExpected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN) - .shardId(SHARD_ID).startingPosition(StartingPosition.builder().sequenceNumber("3") - .type(ShardIteratorType.AFTER_SEQUENCE_NUMBER).build()) + SubscribeToShardRequest nextExpected = SubscribeToShardRequest.builder() + .consumerARN(CONSUMER_ARN) + .shardId(SHARD_ID) + .startingPosition(StartingPosition.builder() + .sequenceNumber("3") + .type(ShardIteratorType.AFTER_SEQUENCE_NUMBER) + .build()) .build(); - verify(kinesisClient).subscribeToShard(argThat(new SubscribeToShardRequestMatcher(nextExpected)), nextFlowCaptor.capture()); + verify(kinesisClient) + .subscribeToShard(argThat(new SubscribeToShardRequestMatcher(nextExpected)), nextFlowCaptor.capture()); reset(publisher); doNothing().when(publisher).subscribe(nextSubscribeCaptor.capture()); @@ -1229,11 +1369,15 @@ public class FanOutRecordsPublisherTest { nextSubscribeCaptor.getValue().onSubscribe(subscription); List nextRecords = Stream.of(4, 5, 6).map(this::makeRecord).collect(Collectors.toList()); - List nextMatchers = nextRecords.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List nextMatchers = + nextRecords.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(nextRecords) - .continuationSequenceNumber("6").childShards(Collections.emptyList()).build(); + batchEvent = SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .records(nextRecords) + .continuationSequenceNumber("6") + .childShards(Collections.emptyList()) + .build(); nextSubscribeCaptor.getValue().onNext(batchEvent); verify(subscription, times(4)).request(1); @@ -1242,46 +1386,64 @@ public class FanOutRecordsPublisherTest { verifyRecords(nonFailingSubscriber.received.get(0).records(), matchers); verifyRecords(nonFailingSubscriber.received.get(1).records(), nextMatchers); - } @Test public void testIfBufferingRecordsWithinCapacityPublishesOneEvent() { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); RecordsRetrieved recordsRetrieved = ProcessRecordsInput.builder()::build; FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 10).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired(recordsRetrieved, recordFlow)); + IntStream.rangeClosed(1, 10) + .forEach(i -> + fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired(recordsRetrieved, recordFlow)); assertEquals(1, totalRecordsRetrieved[0]); } @Test public void testIfBufferingRecordsOverCapacityPublishesOneEventAndThrows() { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); RecordsRetrieved recordsRetrieved = ProcessRecordsInput.builder()::build; FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); try { - IntStream.rangeClosed(1, 12).forEach( - i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired(recordsRetrieved, recordFlow)); + IntStream.rangeClosed(1, 12) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + recordsRetrieved, recordFlow)); fail("Should throw Queue full exception"); } catch (IllegalStateException e) { assertEquals("Queue full", e.getMessage()); @@ -1291,87 +1453,116 @@ public class FanOutRecordsPublisherTest { @Test public void testIfPublisherAlwaysPublishesWhenQueueIsEmpty() { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; // This makes sure the queue is immediately made empty, so that the next event enqueued will // be the only element in the queue. - fanOutRecordsPublisher - .evictAckedEventAndScheduleNextEvent(() -> recordsRetrieved.batchUniqueIdentifier()); + fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent( + () -> recordsRetrieved.batchUniqueIdentifier()); } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 137).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( - new FanOutRecordsPublisher.FanoutRecordsRetrieved(ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), - recordFlow)); + IntStream.rangeClosed(1, 137) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + new FanOutRecordsPublisher.FanoutRecordsRetrieved( + ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), + recordFlow)); assertEquals(137, totalRecordsRetrieved[0]); } @Test public void testIfPublisherIgnoresStaleEventsAndContinuesWithNextFlow() { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; // This makes sure the queue is immediately made empty, so that the next event enqueued will // be the only element in the queue. - fanOutRecordsPublisher - .evictAckedEventAndScheduleNextEvent(() -> recordsRetrieved.batchUniqueIdentifier()); + fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent( + () -> recordsRetrieved.batchUniqueIdentifier()); // Send stale event periodically - if(totalRecordsRetrieved[0] % 10 == 0) { + if (totalRecordsRetrieved[0] % 10 == 0) { fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent( () -> new BatchUniqueIdentifier("some_uuid_str", "some_old_flow")); } } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 100).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( - new FanOutRecordsPublisher.FanoutRecordsRetrieved(ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), - recordFlow)); + IntStream.rangeClosed(1, 100) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + new FanOutRecordsPublisher.FanoutRecordsRetrieved( + ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), + recordFlow)); assertEquals(100, totalRecordsRetrieved[0]); } @Test public void testIfPublisherIgnoresStaleEventsAndContinuesWithNextFlowWhenDeliveryQueueIsNotEmpty() throws InterruptedException { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; BlockingQueue ackQueue = new LinkedBlockingQueue<>(); fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; // Enqueue the ack for bursty delivery ackQueue.add(recordsRetrieved.batchUniqueIdentifier()); // Send stale event periodically } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 10).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( - new FanOutRecordsPublisher.FanoutRecordsRetrieved(ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), - recordFlow)); + IntStream.rangeClosed(1, 10) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + new FanOutRecordsPublisher.FanoutRecordsRetrieved( + ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), + recordFlow)); BatchUniqueIdentifier batchUniqueIdentifierQueued; int count = 0; // Now that we allowed upto 10 elements queued up, send a pair of good and stale ack to verify records // delivered as expected. - while(count++ < 10 && (batchUniqueIdentifierQueued = ackQueue.take()) != null) { + while (count++ < 10 && (batchUniqueIdentifierQueued = ackQueue.take()) != null) { final BatchUniqueIdentifier batchUniqueIdentifierFinal = batchUniqueIdentifierQueued; - fanOutRecordsPublisher - .evictAckedEventAndScheduleNextEvent(() -> batchUniqueIdentifierFinal); + fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent(() -> batchUniqueIdentifierFinal); fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent( () -> new BatchUniqueIdentifier("some_uuid_str", "some_old_flow")); } @@ -1380,30 +1571,40 @@ public class FanOutRecordsPublisherTest { @Test(expected = IllegalStateException.class) public void testIfPublisherThrowsWhenMismatchAckforActiveFlowSeen() throws InterruptedException { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "Shard-001-1"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; BlockingQueue ackQueue = new LinkedBlockingQueue<>(); fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; // Enqueue the ack for bursty delivery ackQueue.add(recordsRetrieved.batchUniqueIdentifier()); // Send stale event periodically } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 10).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( - new FanOutRecordsPublisher.FanoutRecordsRetrieved(ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), - recordFlow)); + IntStream.rangeClosed(1, 10) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + new FanOutRecordsPublisher.FanoutRecordsRetrieved( + ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), + recordFlow)); BatchUniqueIdentifier batchUniqueIdentifierQueued; int count = 0; // Now that we allowed upto 10 elements queued up, send a pair of good and stale ack to verify records // delivered as expected. - while(count++ < 2 && (batchUniqueIdentifierQueued = ackQueue.poll(1000, TimeUnit.MILLISECONDS)) != null) { + while (count++ < 2 && (batchUniqueIdentifierQueued = ackQueue.poll(1000, TimeUnit.MILLISECONDS)) != null) { final BatchUniqueIdentifier batchUniqueIdentifierFinal = batchUniqueIdentifierQueued; fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent( () -> new BatchUniqueIdentifier("some_uuid_str", batchUniqueIdentifierFinal.getFlowIdentifier())); @@ -1421,14 +1622,15 @@ public class FanOutRecordsPublisherTest { } }; - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); RecordingSubscriber subscriber = new RecordingSubscriber(); source.subscribe(subscriber); @@ -1437,31 +1639,35 @@ public class FanOutRecordsPublisherTest { Throwable exception = new CompletionException( "software.amazon.awssdk.core.exception.SdkClientException", - SdkClientException.create(null, new Throwable( - "Acquire operation took longer than the configured maximum time. This indicates that a " + - "request cannot get a connection from the pool within the specified maximum time. " + - "This can be due to high request rate.\n" + - "Consider taking any of the following actions to mitigate the issue: increase max " + - "connections, increase acquire timeout, or slowing the request rate.\n" + - "Increasing the max connections can increase client throughput (unless the network " + - "interface is already fully utilized), but can eventually start to hit operation " + - "system limitations on the number of file descriptors used by the process. " + - "If you already are fully utilizing your network interface or cannot further " + - "increase your connection count, increasing the acquire timeout gives extra time " + - "for requests to acquire a connection before timing out. " + - "If the connections doesn't free up, the subsequent requests will still timeout.\n" + - "If the above mechanisms are not able to fix the issue, try smoothing out your " + - "requests so that large traffic bursts cannot overload the client, being more " + - "efficient with the number of times you need to call AWS, or by increasing the " + - "number of hosts sending requests."))); + SdkClientException.create( + null, + new Throwable( + "Acquire operation took longer than the configured maximum time. This indicates that a " + + "request cannot get a connection from the pool within the specified maximum time. " + + "This can be due to high request rate.\n" + + "Consider taking any of the following actions to mitigate the issue: increase max " + + "connections, increase acquire timeout, or slowing the request rate.\n" + + "Increasing the max connections can increase client throughput (unless the network " + + "interface is already fully utilized), but can eventually start to hit operation " + + "system limitations on the number of file descriptors used by the process. " + + "If you already are fully utilizing your network interface or cannot further " + + "increase your connection count, increasing the acquire timeout gives extra time " + + "for requests to acquire a connection before timing out. " + + "If the connections doesn't free up, the subsequent requests will still timeout.\n" + + "If the above mechanisms are not able to fix the issue, try smoothing out your " + + "requests so that large traffic bursts cannot overload the client, being more " + + "efficient with the number of times you need to call AWS, or by increasing the " + + "number of hosts sending requests."))); flowCaptor.getValue().exceptionOccurred(exception); - Optional onErrorEvent = subscriber.events.stream().filter(e -> e instanceof OnErrorEvent).map(e -> (OnErrorEvent)e).findFirst(); + Optional onErrorEvent = subscriber.events.stream() + .filter(e -> e instanceof OnErrorEvent) + .map(e -> (OnErrorEvent) e) + .findFirst(); assertThat(onErrorEvent, equalTo(Optional.of(new OnErrorEvent(exception)))); assertThat(acquireTimeoutLogged.get(), equalTo(true)); - } private void verifyRecords(List clientRecordsList, List matchers) { @@ -1471,9 +1677,7 @@ public class FanOutRecordsPublisherTest { } } - private interface SubscriberEvent { - - } + private interface SubscriberEvent {} @Data private static class SubscribeEvent implements SubscriberEvent { @@ -1491,9 +1695,7 @@ public class FanOutRecordsPublisherTest { } @Data - private static class OnCompleteEvent implements SubscriberEvent { - - } + private static class OnCompleteEvent implements SubscriberEvent {} @Data private static class RequestEvent implements SubscriberEvent { @@ -1573,8 +1775,11 @@ public class FanOutRecordsPublisherTest { private Runnable errorAction; private Consumer shardEndAction; - BackpressureAdheringServicePublisher(Consumer action, Integer numOfTimes, - CountDownLatch taskCompletionLatch, Integer initialDemand) { + BackpressureAdheringServicePublisher( + Consumer action, + Integer numOfTimes, + CountDownLatch taskCompletionLatch, + Integer initialDemand) { this(action, numOfTimes, taskCompletionLatch, new Semaphore(initialDemand)); sendCompletionAt = Integer.MAX_VALUE; sendErrorAt = Integer.MAX_VALUE; @@ -1587,8 +1792,8 @@ public class FanOutRecordsPublisherTest { public void run() { for (int i = 1; i <= numOfTimes; ) { demandNotifier.acquireUninterruptibly(); - if(i == sendCompletionAt) { - if(shardEndAction != null) { + if (i == sendCompletionAt) { + if (shardEndAction != null) { shardEndAction.accept(i++); } else { action.accept(i++); @@ -1596,7 +1801,7 @@ public class FanOutRecordsPublisherTest { completeAction.run(); break; } - if(i == sendErrorAt) { + if (i == sendErrorAt) { action.accept(i++); errorAction.run(); break; @@ -1611,7 +1816,8 @@ public class FanOutRecordsPublisherTest { this.completeAction = completeAction; } - public void setShardEndAndCompleteTrigger(Integer sendCompletionAt, Runnable completeAction, Consumer shardEndAction) { + public void setShardEndAndCompleteTrigger( + Integer sendCompletionAt, Runnable completeAction, Consumer shardEndAction) { setCompleteTrigger(sendCompletionAt, completeAction); this.shardEndAction = shardEndAction; } @@ -1627,9 +1833,13 @@ public class FanOutRecordsPublisherTest { } private Record makeRecord(int sequenceNumber) { - SdkBytes buffer = SdkBytes.fromByteArray(new byte[] { 1, 2, 3 }); - return Record.builder().data(buffer).approximateArrivalTimestamp(Instant.now()) - .sequenceNumber(Integer.toString(sequenceNumber)).partitionKey("A").build(); + SdkBytes buffer = SdkBytes.fromByteArray(new byte[] {1, 2, 3}); + return Record.builder() + .data(buffer) + .approximateArrivalTimestamp(Instant.now()) + .sequenceNumber(Integer.toString(sequenceNumber)) + .partitionKey("A") + .build(); } private static class KinesisClientRecordMatcher extends TypeSafeDiagnosingMatcher { @@ -1646,23 +1856,25 @@ public class FanOutRecordsPublisherTest { sequenceNumberMatcher = equalTo(expected.sequenceNumber()); approximateArrivalMatcher = equalTo(expected.approximateArrivalTimestamp()); dataMatcher = equalTo(expected.data()); - } @Override protected boolean matchesSafely(KinesisClientRecord item, Description mismatchDescription) { - boolean matches = matchAndDescribe(partitionKeyMatcher, item.partitionKey(), "partitionKey", + boolean matches = + matchAndDescribe(partitionKeyMatcher, item.partitionKey(), "partitionKey", mismatchDescription); + matches &= matchAndDescribe( + sequenceNumberMatcher, item.sequenceNumber(), "sequenceNumber", mismatchDescription); + matches &= matchAndDescribe( + approximateArrivalMatcher, + item.approximateArrivalTimestamp(), + "approximateArrivalTimestamp", mismatchDescription); - matches &= matchAndDescribe(sequenceNumberMatcher, item.sequenceNumber(), "sequenceNumber", - mismatchDescription); - matches &= matchAndDescribe(approximateArrivalMatcher, item.approximateArrivalTimestamp(), - "approximateArrivalTimestamp", mismatchDescription); matches &= matchAndDescribe(dataMatcher, item.data(), "data", mismatchDescription); return matches; } - private boolean matchAndDescribe(Matcher matcher, T value, String field, - Description mismatchDescription) { + private boolean matchAndDescribe( + Matcher matcher, T value, String field, Description mismatchDescription) { if (!matcher.matches(value)) { mismatchDescription.appendText(field).appendText(": "); matcher.describeMismatch(value, mismatchDescription); @@ -1673,13 +1885,16 @@ public class FanOutRecordsPublisherTest { @Override public void describeTo(Description description) { - description.appendText("A kinesis client record with: ").appendText("PartitionKey: ") - .appendDescriptionOf(partitionKeyMatcher).appendText(" SequenceNumber: ") - .appendDescriptionOf(sequenceNumberMatcher).appendText(" Approximate Arrival Time: ") - .appendDescriptionOf(approximateArrivalMatcher).appendText(" Data: ") + description + .appendText("A kinesis client record with: ") + .appendText("PartitionKey: ") + .appendDescriptionOf(partitionKeyMatcher) + .appendText(" SequenceNumber: ") + .appendDescriptionOf(sequenceNumberMatcher) + .appendText(" Approximate Arrival Time: ") + .appendDescriptionOf(approximateArrivalMatcher) + .appendText(" Data: ") .appendDescriptionOf(dataMatcher); } - } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumberTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumberTest.java new file mode 100644 index 00000000..9dcba69a --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumberTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2023 Amazon.com, Inc. or its affiliates. + * Licensed under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package software.amazon.kinesis.retrieval.kpl; + +import org.junit.Test; +import software.amazon.kinesis.checkpoint.SentinelCheckpoint; + +import static org.junit.Assert.assertTrue; + +public class ExtendedSequenceNumberTest { + + @Test + public void testSentinelCheckpoints() { + for (final SentinelCheckpoint sentinel : SentinelCheckpoint.values()) { + final ExtendedSequenceNumber esn = new ExtendedSequenceNumber(sentinel.name()); + assertTrue(sentinel.name(), esn.isSentinelCheckpoint()); + + // For backwards-compatibility, sentinels should ignore subsequences + final ExtendedSequenceNumber esnWithSubsequence = new ExtendedSequenceNumber(sentinel.name(), 42L); + assertTrue(sentinel.name(), esnWithSubsequence.isSentinelCheckpoint()); + } + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java index bd3b7047..2528f158 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java @@ -14,16 +14,6 @@ */ package software.amazon.kinesis.retrieval.polling; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.core.IsEqual.equalTo; -import static org.junit.Assert.assertNull; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; @@ -33,6 +23,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -41,15 +32,22 @@ import org.mockito.Mock; import org.mockito.invocation.InvocationOnMock; import org.mockito.runners.MockitoJUnitRunner; import org.mockito.stubbing.Answer; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.kinesis.retrieval.DataFetcherResult; import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.NullMetricsFactory; +import software.amazon.kinesis.retrieval.DataFetcherResult; + +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.junit.Assert.assertNull; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { @@ -65,8 +63,10 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { @Mock private Supplier> completionServiceSupplier; + @Mock private DataFetcherResult result; + @Mock private KinesisAsyncClient kinesisClient; @@ -79,7 +79,6 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { private RejectedExecutionHandler rejectedExecutionHandler; private int numberOfRecords = 10; - @Before public void setup() { dataFetcher = spy(new KinesisDataFetcherForTests(kinesisClient, streamName, shardId, numberOfRecords)); @@ -90,11 +89,14 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { TIME_TO_LIVE, TimeUnit.SECONDS, new LinkedBlockingQueue<>(1), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("getrecords-worker-%d").build(), + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("getrecords-worker-%d") + .build(), rejectedExecutionHandler)); completionService = spy(new ExecutorCompletionService(executorService)); - getRecordsRetrivalStrategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, executorService, - RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, "shardId-0001"); + getRecordsRetrivalStrategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, "shardId-0001"); getRecordsResponse = GetRecordsResponse.builder().build(); when(completionServiceSupplier.get()).thenReturn(completionService); @@ -112,7 +114,8 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { @Test public void multiRequestTest() { - ExecutorCompletionService completionService1 = spy(new ExecutorCompletionService(executorService)); + ExecutorCompletionService completionService1 = + spy(new ExecutorCompletionService(executorService)); when(completionServiceSupplier.get()).thenReturn(completionService1); GetRecordsResponse getRecordsResult = getRecordsRetrivalStrategy.getRecords(numberOfRecords); verify(dataFetcher, atLeast(getLeastNumberOfCalls())).getRecords(); @@ -120,7 +123,8 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { assertThat(getRecordsResult, equalTo(getRecordsResponse)); when(result.accept()).thenReturn(null); - ExecutorCompletionService completionService2 = spy(new ExecutorCompletionService(executorService)); + ExecutorCompletionService completionService2 = + spy(new ExecutorCompletionService(executorService)); when(completionServiceSupplier.get()).thenReturn(completionService2); getRecordsResult = getRecordsRetrivalStrategy.getRecords(numberOfRecords); assertThat(getRecordsResult, nullValue(GetRecordsResponse.class)); @@ -132,7 +136,9 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { @Override public DataFetcherResult answer(final InvocationOnMock invocationOnMock) throws Throwable { Thread.sleep(SLEEP_GET_RECORDS_IN_SECONDS * 1000); - throw ExpiredIteratorException.builder().message("ExpiredIterator").build(); + throw ExpiredIteratorException.builder() + .message("ExpiredIterator") + .build(); } }); @@ -162,8 +168,11 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { } private class KinesisDataFetcherForTests extends KinesisDataFetcher { - public KinesisDataFetcherForTests(final KinesisAsyncClient kinesisClient, final String streamName, - final String shardId, final int maxRecords) { + public KinesisDataFetcherForTests( + final KinesisAsyncClient kinesisClient, + final String streamName, + final String shardId, + final int maxRecords) { super(kinesisClient, streamName, shardId, maxRecords, NULL_METRICS_FACTORY); } @@ -178,5 +187,4 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { return result; } } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java index 2c7d8fd1..13db2a4d 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java @@ -14,18 +14,6 @@ */ package software.amazon.kinesis.retrieval.polling; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -39,11 +27,22 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; import software.amazon.kinesis.retrieval.DataFetcherResult; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + /** * */ @@ -52,18 +51,25 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { private static final long RETRY_GET_RECORDS_IN_SECONDS = 5; private static final String SHARD_ID = "ShardId-0001"; + @Mock private KinesisDataFetcher dataFetcher; + @Mock private ExecutorService executorService; + @Mock private Supplier> completionServiceSupplier; + @Mock private CompletionService completionService; + @Mock private Future successfulFuture; + @Mock private Future blockedFuture; + @Mock private DataFetcherResult dataFetcherResult; @@ -80,8 +86,8 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { @Test public void testSingleSuccessfulRequestFuture() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); when(executorService.isShutdown()).thenReturn(false); when(completionService.submit(any())).thenReturn(successfulFuture); @@ -101,8 +107,8 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { @Test public void testBlockedAndSuccessfulFuture() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); when(executorService.isShutdown()).thenReturn(false); when(completionService.submit(any())).thenReturn(blockedFuture).thenReturn(successfulFuture); @@ -126,9 +132,9 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { } @Test(expected = IllegalStateException.class) - public void testStrategyIsShutdown() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + public void testStrategyIsShutdown() { + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); when(executorService.isShutdown()).thenReturn(true); @@ -137,12 +143,18 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { @Test public void testPoolOutOfResources() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); when(executorService.isShutdown()).thenReturn(false); - when(completionService.submit(any())).thenReturn(blockedFuture).thenThrow(new RejectedExecutionException("Rejected!")).thenReturn(successfulFuture); - when(completionService.poll(anyLong(), any())).thenReturn(null).thenReturn(null).thenReturn(successfulFuture); + when(completionService.submit(any())) + .thenReturn(blockedFuture) + .thenThrow(new RejectedExecutionException("Rejected!")) + .thenReturn(successfulFuture); + when(completionService.poll(anyLong(), any())) + .thenReturn(null) + .thenReturn(null) + .thenReturn(successfulFuture); when(successfulFuture.get()).thenReturn(dataFetcherResult); when(successfulFuture.cancel(anyBoolean())).thenReturn(false); when(blockedFuture.cancel(anyBoolean())).thenReturn(true); @@ -156,21 +168,23 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { verify(successfulFuture).cancel(eq(true)); verify(blockedFuture).cancel(eq(true)); - assertThat(actualResult, equalTo(expectedResponses)); } - - @Test (expected = ExpiredIteratorException.class) + + @Test(expected = ExpiredIteratorException.class) public void testExpiredIteratorExceptionCase() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); Future successfulFuture2 = mock(Future.class); when(executorService.isShutdown()).thenReturn(false); when(completionService.submit(any())).thenReturn(successfulFuture, successfulFuture2); when(completionService.poll(anyLong(), any())).thenReturn(null).thenReturn(successfulFuture); - when(successfulFuture.get()).thenThrow(new ExecutionException(ExpiredIteratorException.builder().message("ExpiredException").build())); - + when(successfulFuture.get()) + .thenThrow(new ExecutionException(ExpiredIteratorException.builder() + .message("ExpiredException") + .build())); + try { strategy.getRecords(10); } finally { @@ -181,5 +195,4 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { verify(successfulFuture2).cancel(eq(true)); } } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java index 2e09f34a..b3cd0c2a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java @@ -14,21 +14,6 @@ */ package software.amazon.kinesis.retrieval.polling; -import static org.hamcrest.CoreMatchers.isA; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -50,7 +35,6 @@ import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.ChildShard; @@ -75,6 +59,21 @@ import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; import software.amazon.kinesis.retrieval.RetryableRetrievalException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.CoreMatchers.isA; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + /** * Unit tests for KinesisDataFetcher. */ @@ -83,20 +82,22 @@ public class KinesisDataFetcherTest { private static final int MAX_RECORDS = 1; private static final String STREAM_NAME = "streamName"; private static final String SHARD_ID = "shardId-1"; - private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); - private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(1000)); + private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(1000)); private static final MetricsFactory NULL_METRICS_FACTORY = new NullMetricsFactory(); private KinesisDataFetcher kinesisDataFetcher; @Mock private KinesisAsyncClient kinesisClient; + @Mock private CompletableFuture getRecordsResponseFuture; + @Mock private CompletableFuture getShardIteratorResponseFuture; @@ -105,8 +106,8 @@ public class KinesisDataFetcherTest { @Before public void setup() { - kinesisDataFetcher = new KinesisDataFetcher(kinesisClient, STREAM_NAME, SHARD_ID, MAX_RECORDS, - NULL_METRICS_FACTORY); + kinesisDataFetcher = + new KinesisDataFetcher(kinesisClient, STREAM_NAME, SHARD_ID, MAX_RECORDS, NULL_METRICS_FACTORY); } /** @@ -114,8 +115,8 @@ public class KinesisDataFetcherTest { */ @Test public final void testInitializeLatest() throws Exception { - testInitializeAndFetch(ShardIteratorType.LATEST.toString(), ShardIteratorType.LATEST.toString(), - INITIAL_POSITION_LATEST); + testInitializeAndFetch( + ShardIteratorType.LATEST.toString(), ShardIteratorType.LATEST.toString(), INITIAL_POSITION_LATEST); } /** @@ -123,7 +124,9 @@ public class KinesisDataFetcherTest { */ @Test public final void testInitializeTimeZero() throws Exception { - testInitializeAndFetch(ShardIteratorType.TRIM_HORIZON.toString(), ShardIteratorType.TRIM_HORIZON.toString(), + testInitializeAndFetch( + ShardIteratorType.TRIM_HORIZON.toString(), + ShardIteratorType.TRIM_HORIZON.toString(), INITIAL_POSITION_TRIM_HORIZON); } @@ -132,7 +135,9 @@ public class KinesisDataFetcherTest { */ @Test public final void testInitializeAtTimestamp() throws Exception { - testInitializeAndFetch(ShardIteratorType.AT_TIMESTAMP.toString(), ShardIteratorType.AT_TIMESTAMP.toString(), + testInitializeAndFetch( + ShardIteratorType.AT_TIMESTAMP.toString(), + ShardIteratorType.AT_TIMESTAMP.toString(), INITIAL_POSITION_AT_TIMESTAMP); } @@ -154,8 +159,8 @@ public class KinesisDataFetcherTest { } private CompletableFuture makeGetShardIteratorResponse(String shardIterator) { - return CompletableFuture - .completedFuture(GetShardIteratorResponse.builder().shardIterator(shardIterator).build()); + return CompletableFuture.completedFuture( + GetShardIteratorResponse.builder().shardIterator(shardIterator).build()); } @Test @@ -166,8 +171,8 @@ public class KinesisDataFetcherTest { final String seqA = "123"; final String seqB = "456"; - ArgumentCaptor shardIteratorRequestCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + ArgumentCaptor shardIteratorRequestCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); when(kinesisClient.getShardIterator(shardIteratorRequestCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(iteratorA)) @@ -197,24 +202,30 @@ public class KinesisDataFetcherTest { } private GetShardIteratorRequest makeGetShardIteratorRequest(String shardIteratorType) { - return GetShardIteratorRequest.builder().shardIteratorType(shardIteratorType).streamName(STREAM_NAME) - .shardId(SHARD_ID).build(); + return GetShardIteratorRequest.builder() + .shardIteratorType(shardIteratorType) + .streamName(STREAM_NAME) + .shardId(SHARD_ID) + .build(); } @Test - public void testAdvanceIteratorToTrimHorizonLatestAndAtTimestamp(){ - final ArgumentCaptor requestCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + public void testAdvanceIteratorToTrimHorizonLatestAndAtTimestamp() { + final ArgumentCaptor requestCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final String iteratorHorizon = "TRIM_HORIZON"; final String iteratorLatest = "LATEST"; final String iteratorAtTimestamp = "AT_TIMESTAMP"; - final Map requestsMap = Arrays - .stream(new String[] { iteratorHorizon, iteratorLatest, iteratorAtTimestamp }) + final Map requestsMap = Arrays.stream( + new String[] {iteratorHorizon, iteratorLatest, iteratorAtTimestamp}) .map(this::makeGetShardIteratorRequest) .collect(Collectors.toMap(r -> ShardIteratorType.valueOf(r.shardIteratorTypeAsString()), r -> r)); GetShardIteratorRequest tsReq = requestsMap.get(ShardIteratorType.AT_TIMESTAMP); - requestsMap.put(ShardIteratorType.AT_TIMESTAMP, - tsReq.toBuilder().timestamp(INITIAL_POSITION_AT_TIMESTAMP.getTimestamp().toInstant()).build()); + requestsMap.put( + ShardIteratorType.AT_TIMESTAMP, + tsReq.toBuilder() + .timestamp(INITIAL_POSITION_AT_TIMESTAMP.getTimestamp().toInstant()) + .build()); when(kinesisClient.getShardIterator(requestCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(iteratorHorizon)) @@ -241,19 +252,22 @@ public class KinesisDataFetcherTest { } private GetRecordsRequest makeGetRecordsRequest(String shardIterator) { - return GetRecordsRequest.builder().shardIterator(shardIterator).limit(MAX_RECORDS).build(); + return GetRecordsRequest.builder() + .shardIterator(shardIterator) + .limit(MAX_RECORDS) + .build(); } @Test public void testGetRecordsWithResourceNotFoundException() throws Exception { - final ArgumentCaptor iteratorCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + final ArgumentCaptor iteratorCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); // Set up arguments used by proxy final String nextIterator = "TestShardIterator"; - final GetShardIteratorRequest expectedIteratorRequest = makeGetShardIteratorRequest( - ShardIteratorType.LATEST.name()); + final GetShardIteratorRequest expectedIteratorRequest = + makeGetShardIteratorRequest(ShardIteratorType.LATEST.name()); final GetRecordsRequest expectedRecordsRequest = makeGetRecordsRequest(nextIterator); final CompletableFuture future = mock(CompletableFuture.class); @@ -262,21 +276,27 @@ public class KinesisDataFetcherTest { when(kinesisClient.getShardIterator(iteratorCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(nextIterator)); when(kinesisClient.getRecords(recordsCaptor.capture())).thenReturn(future); - when(future.get(anyLong(), any(TimeUnit.class))).thenThrow( - new ExecutionException(ResourceNotFoundException.builder().message("Test Exception").build())); + when(future.get(anyLong(), any(TimeUnit.class))) + .thenThrow(new ExecutionException(ResourceNotFoundException.builder() + .message("Test Exception") + .build())); // Create data fectcher and initialize it with latest type checkpoint kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = new SynchronousGetRecordsRetrievalStrategy( - kinesisDataFetcher); + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = + new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); try { // Call records of dataFetcher which will throw an exception getRecordsRetrievalStrategy.getRecords(MAX_RECORDS); } finally { // Test shard has reached the end assertTrue("Shard should reach the end", kinesisDataFetcher.isShardEndReached()); - assertEquals(expectedIteratorRequest.startingSequenceNumber(), iteratorCaptor.getValue().startingSequenceNumber()); - assertEquals(expectedRecordsRequest.shardIterator(), recordsCaptor.getValue().shardIterator()); + assertEquals( + expectedIteratorRequest.startingSequenceNumber(), + iteratorCaptor.getValue().startingSequenceNumber()); + assertEquals( + expectedRecordsRequest.shardIterator(), + recordsCaptor.getValue().shardIterator()); } } @@ -285,19 +305,20 @@ public class KinesisDataFetcherTest { expectedExceptionRule.expect(SdkException.class); expectedExceptionRule.expectMessage("Test Exception"); - CompletableFuture getShardIteratorFuture = CompletableFuture - .completedFuture(GetShardIteratorResponse.builder().shardIterator("test").build()); + CompletableFuture getShardIteratorFuture = CompletableFuture.completedFuture( + GetShardIteratorResponse.builder().shardIterator("test").build()); // Set up proxy mock methods when(kinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(getShardIteratorFuture); when(kinesisClient.getRecords(any(GetRecordsRequest.class))).thenReturn(getRecordsResponseFuture); when(getRecordsResponseFuture.get(anyLong(), any(TimeUnit.class))) - .thenThrow(new ExecutionException(SdkException.builder().message("Test Exception").build())); + .thenThrow(new ExecutionException( + SdkException.builder().message("Test Exception").build())); // Create data fectcher and initialize it with latest type checkpoint kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = new SynchronousGetRecordsRetrievalStrategy( - kinesisDataFetcher); + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = + new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); // Call records of dataFetcher which will throw an exception getRecordsRetrievalStrategy.getRecords(MAX_RECORDS); @@ -306,11 +327,11 @@ public class KinesisDataFetcherTest { @Test public void testNonNullGetRecords() throws Exception { final String nextIterator = "TestIterator"; - final ArgumentCaptor iteratorCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + final ArgumentCaptor iteratorCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); - final GetShardIteratorRequest expectedIteratorRequest = makeGetShardIteratorRequest( - ShardIteratorType.LATEST.name()); + final GetShardIteratorRequest expectedIteratorRequest = + makeGetShardIteratorRequest(ShardIteratorType.LATEST.name()); final GetRecordsRequest expectedRecordsRequest = makeGetRecordsRequest(nextIterator); final CompletableFuture future = mock(CompletableFuture.class); @@ -318,24 +339,32 @@ public class KinesisDataFetcherTest { when(kinesisClient.getShardIterator(iteratorCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(nextIterator)); when(kinesisClient.getRecords(recordsCaptor.capture())).thenReturn(future); - when(future.get(anyLong(), any(TimeUnit.class))).thenThrow( - new ExecutionException(ResourceNotFoundException.builder().message("Test Exception").build())); + when(future.get(anyLong(), any(TimeUnit.class))) + .thenThrow(new ExecutionException(ResourceNotFoundException.builder() + .message("Test Exception") + .build())); kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); DataFetcherResult dataFetcherResult = kinesisDataFetcher.getRecords(); assertNotNull(dataFetcherResult); - assertEquals(expectedIteratorRequest.startingSequenceNumber(), iteratorCaptor.getValue().startingSequenceNumber()); - assertEquals(expectedRecordsRequest.shardIterator(), recordsCaptor.getValue().shardIterator()); + assertEquals( + expectedIteratorRequest.startingSequenceNumber(), + iteratorCaptor.getValue().startingSequenceNumber()); + assertEquals( + expectedRecordsRequest.shardIterator(), recordsCaptor.getValue().shardIterator()); } private CompletableFuture makeGetRecordsResponse(String nextIterator, List records) { List childShards = new ArrayList<>(); - if(nextIterator == null) { + if (nextIterator == null) { childShards = createChildShards(); } - return CompletableFuture.completedFuture(GetRecordsResponse.builder().nextShardIterator(nextIterator) - .records(CollectionUtils.isNullOrEmpty(records) ? Collections.emptyList() : records).childShards(childShards).build()); + return CompletableFuture.completedFuture(GetRecordsResponse.builder() + .nextShardIterator(nextIterator) + .records(CollectionUtils.isNullOrEmpty(records) ? Collections.emptyList() : records) + .childShards(childShards) + .build()); } private List createChildShards() { @@ -343,15 +372,15 @@ public class KinesisDataFetcherTest { List parentShards = new ArrayList<>(); parentShards.add(SHARD_ID); ChildShard leftChild = ChildShard.builder() - .shardId("Shard-2") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) - .build(); + .shardId("Shard-2") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) + .build(); ChildShard rightChild = ChildShard.builder() - .shardId("Shard-3") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) - .build(); + .shardId("Shard-3") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) + .build(); childShards.add(leftChild); childShards.add(rightChild); return childShards; @@ -359,26 +388,33 @@ public class KinesisDataFetcherTest { @Test public void testFetcherDoesNotAdvanceWithoutAccept() throws InterruptedException, ExecutionException { - final ArgumentCaptor iteratorCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + final ArgumentCaptor iteratorCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); final String initialIterator = "InitialIterator"; final String nextIterator1 = "NextIteratorOne"; final String nextIterator2 = "NextIteratorTwo"; final CompletableFuture nonAdvancingResult1 = makeGetRecordsResponse(initialIterator, null); final CompletableFuture nonAdvancingResult2 = makeGetRecordsResponse(nextIterator1, null); - final CompletableFuture finalNonAdvancingResult = makeGetRecordsResponse(nextIterator2, - null); + final CompletableFuture finalNonAdvancingResult = + makeGetRecordsResponse(nextIterator2, null); final CompletableFuture advancingResult1 = makeGetRecordsResponse(nextIterator1, null); final CompletableFuture advancingResult2 = makeGetRecordsResponse(nextIterator2, null); final CompletableFuture finalAdvancingResult = makeGetRecordsResponse(null, null); when(kinesisClient.getShardIterator(iteratorCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(initialIterator)); - when(kinesisClient.getRecords(recordsCaptor.capture())).thenReturn(nonAdvancingResult1, advancingResult1, - nonAdvancingResult2, advancingResult2, finalNonAdvancingResult, finalAdvancingResult); + when(kinesisClient.getRecords(recordsCaptor.capture())) + .thenReturn( + nonAdvancingResult1, + advancingResult1, + nonAdvancingResult2, + advancingResult2, + finalNonAdvancingResult, + finalAdvancingResult); - kinesisDataFetcher.initialize("TRIM_HORIZON", + kinesisDataFetcher.initialize( + "TRIM_HORIZON", InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON)); assertNoAdvance(nonAdvancingResult1.get(), initialIterator); @@ -412,8 +448,8 @@ public class KinesisDataFetcherTest { @Ignore public void testRestartIterator() throws Exception { GetRecordsResponse getRecordsResult = mock(GetRecordsResponse.class); - GetRecordsResponse restartGetRecordsResponse = makeGetRecordsResponse(null, null).get(anyLong(), - any(TimeUnit.class)); + GetRecordsResponse restartGetRecordsResponse = + makeGetRecordsResponse(null, null).get(anyLong(), any(TimeUnit.class)); Record record = mock(Record.class); final String nextShardIterator = "NextShardIterator"; final String sequenceNumber = "SequenceNumber"; @@ -437,8 +473,8 @@ public class KinesisDataFetcherTest { final ArgumentCaptor shardIteratorRequestCaptor = ArgumentCaptor.forClass(GetShardIteratorRequest.class); - when(kinesisClient.getShardIterator(shardIteratorRequestCaptor.capture())). - thenReturn(makeGetShardIteratorResponse(iterator)); + when(kinesisClient.getShardIterator(shardIteratorRequestCaptor.capture())) + .thenReturn(makeGetShardIteratorResponse(iterator)); kinesisDataFetcher.initialize(sequenceNumber, INITIAL_POSITION_LATEST); kinesisDataFetcher.restartIterator(); @@ -448,11 +484,14 @@ public class KinesisDataFetcherTest { final List shardIteratorRequests = shardIteratorRequestCaptor.getAllValues(); assertEquals(3, shardIteratorRequests.size()); - assertEquals(ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), + assertEquals( + ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), shardIteratorRequests.get(0).shardIteratorTypeAsString()); - assertEquals(ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), + assertEquals( + ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), shardIteratorRequests.get(1).shardIteratorTypeAsString()); - assertEquals(ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), + assertEquals( + ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), shardIteratorRequests.get(2).shardIteratorTypeAsString()); } @@ -483,8 +522,8 @@ public class KinesisDataFetcherTest { expectedExceptionRule.expectCause(isA(TimeoutException.class)); expectedExceptionRule.expectMessage("Timeout"); - CompletableFuture getShardIteratorFuture = CompletableFuture - .completedFuture(GetShardIteratorResponse.builder().shardIterator("test").build()); + CompletableFuture getShardIteratorFuture = CompletableFuture.completedFuture( + GetShardIteratorResponse.builder().shardIterator("test").build()); // Set up proxy mock methods when(kinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(getShardIteratorFuture); @@ -493,15 +532,15 @@ public class KinesisDataFetcherTest { // Create data fectcher and initialize it with latest type checkpoint kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = new SynchronousGetRecordsRetrievalStrategy( - kinesisDataFetcher); + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = + new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); // Call records of dataFetcher which will throw an exception getRecordsRetrievalStrategy.getRecords(MAX_RECORDS); } - private DataFetcherResult assertAdvanced(GetRecordsResponse expectedResult, String previousValue, - String nextValue) { + private DataFetcherResult assertAdvanced( + GetRecordsResponse expectedResult, String previousValue, String nextValue) { DataFetcherResult acceptResult = kinesisDataFetcher.getRecords(); assertEquals(expectedResult, acceptResult.getResult()); @@ -527,19 +566,25 @@ public class KinesisDataFetcherTest { return noAcceptResult; } - private void testInitializeAndFetch(final String iteratorType, final String seqNo, - final InitialPositionInStreamExtended initialPositionInStream) throws Exception { - final ArgumentCaptor iteratorCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + private void testInitializeAndFetch( + final String iteratorType, + final String seqNo, + final InitialPositionInStreamExtended initialPositionInStream) + throws Exception { + final ArgumentCaptor iteratorCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); final String iterator = "foo"; final List expectedRecords = Collections.emptyList(); GetShardIteratorRequest expectedIteratorRequest = makeGetShardIteratorRequest(iteratorType); if (iteratorType.equals(ShardIteratorType.AT_TIMESTAMP.toString())) { expectedIteratorRequest = expectedIteratorRequest.toBuilder() - .timestamp(initialPositionInStream.getTimestamp().toInstant()).build(); + .timestamp(initialPositionInStream.getTimestamp().toInstant()) + .build(); } else if (iteratorType.equals(ShardIteratorType.AT_SEQUENCE_NUMBER.toString())) { - expectedIteratorRequest = expectedIteratorRequest.toBuilder().startingSequenceNumber(seqNo).build(); + expectedIteratorRequest = expectedIteratorRequest.toBuilder() + .startingSequenceNumber(seqNo) + .build(); } when(kinesisClient.getShardIterator(iteratorCaptor.capture())) @@ -551,13 +596,14 @@ public class KinesisDataFetcherTest { Checkpointer checkpoint = mock(Checkpointer.class); when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqNo)); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = new SynchronousGetRecordsRetrievalStrategy( - kinesisDataFetcher); + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = + new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); kinesisDataFetcher.initialize(seqNo, initialPositionInStream); - assertEquals(expectedRecords, getRecordsRetrievalStrategy.getRecords(MAX_RECORDS).records()); + assertEquals( + expectedRecords, + getRecordsRetrievalStrategy.getRecords(MAX_RECORDS).records()); verify(kinesisClient, times(1)).getShardIterator(any(GetShardIteratorRequest.class)); verify(kinesisClient, times(1)).getRecords(any(GetRecordsRequest.class)); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PollingConfigTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PollingConfigTest.java new file mode 100644 index 00000000..572bc0f0 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PollingConfigTest.java @@ -0,0 +1,51 @@ +package software.amazon.kinesis.retrieval.polling; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +@RunWith(MockitoJUnitRunner.class) +public class PollingConfigTest { + + private static final String STREAM_NAME = PollingConfigTest.class.getSimpleName(); + + @Mock + private KinesisAsyncClient mockKinesisClinet; + + private PollingConfig config; + + @Before + public void setUp() { + config = new PollingConfig(mockKinesisClinet); + } + + @Test + public void testValidState() { + assertNull(config.streamName()); + + config.validateState(true); + config.validateState(false); + + config.streamName(STREAM_NAME); + config.validateState(false); + assertEquals(STREAM_NAME, config.streamName()); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidStateMultiWithStreamName() { + config.streamName(STREAM_NAME); + + config.validateState(true); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidRecordLimit() { + config.maxRecords(PollingConfig.DEFAULT_MAX_RECORDS + 1); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java index 5d757a6c..c5340f97 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java @@ -15,24 +15,6 @@ package software.amazon.kinesis.retrieval.polling; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static software.amazon.kinesis.utils.BlockingUtils.blockUntilConditionSatisfied; -import static software.amazon.kinesis.utils.BlockingUtils.blockUntilRecordsAvailable; - import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -40,6 +22,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import lombok.extern.slf4j.Slf4j; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -51,17 +34,14 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.runners.MockitoJUnitRunner; import org.mockito.stubbing.Answer; - import software.amazon.awssdk.core.SdkBytes; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; import software.amazon.awssdk.services.kinesis.model.Record; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.NullMetricsFactory; @@ -70,6 +50,23 @@ import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; import software.amazon.kinesis.retrieval.RecordsRetrieved; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.kinesis.utils.BlockingUtils.blockUntilConditionSatisfied; +import static software.amazon.kinesis.utils.BlockingUtils.blockUntilRecordsAvailable; + /** * These are the integration tests for the PrefetchRecordsPublisher class. */ @@ -96,8 +93,10 @@ public class PrefetchRecordsPublisherIntegrationTest { @Mock private KinesisAsyncClient kinesisClient; + @Mock private ExtendedSequenceNumber extendedSequenceNumber; + @Mock private InitialPositionInStreamExtended initialPosition; @@ -110,10 +109,14 @@ public class PrefetchRecordsPublisherIntegrationTest { CompletableFuture future = mock(CompletableFuture.class); when(extendedSequenceNumber.sequenceNumber()).thenReturn("LATEST"); - when(future.get(anyLong(), any(TimeUnit.class))).thenReturn(GetShardIteratorResponse.builder().shardIterator("TestIterator").build()); + when(future.get(anyLong(), any(TimeUnit.class))) + .thenReturn(GetShardIteratorResponse.builder() + .shardIterator("TestIterator") + .build()); when(kinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(future); - getRecordsCache = new PrefetchRecordsPublisher(MAX_SIZE, + getRecordsCache = new PrefetchRecordsPublisher( + MAX_SIZE, MAX_BYTE_SIZE, MAX_RECORDS_COUNT, MAX_RECORDS_PER_CALL, @@ -131,14 +134,16 @@ public class PrefetchRecordsPublisherIntegrationTest { getRecordsCache.start(extendedSequenceNumber, initialPosition); sleep(IDLE_MILLIS_BETWEEN_CALLS); - ProcessRecordsInput processRecordsInput1 = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput1 = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); assertTrue(processRecordsInput1.records().isEmpty()); assertEquals(processRecordsInput1.millisBehindLatest(), new Long(1000)); assertNotNull(processRecordsInput1.cacheEntryTime()); - ProcessRecordsInput processRecordsInput2 = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput2 = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); assertNotEquals(processRecordsInput1, processRecordsInput2); @@ -149,11 +154,14 @@ public class PrefetchRecordsPublisherIntegrationTest { getRecordsCache.start(extendedSequenceNumber, initialPosition); sleep(MAX_SIZE * IDLE_MILLIS_BETWEEN_CALLS); - assertEquals(getRecordsCache.getPublisherSession().prefetchRecordsQueue().size(), MAX_SIZE); + assertEquals( + getRecordsCache.getPublisherSession().prefetchRecordsQueue().size(), MAX_SIZE); - ProcessRecordsInput processRecordsInput1 = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput1 = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); - ProcessRecordsInput processRecordsInput2 = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput2 = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); assertNotEquals(processRecordsInput1, processRecordsInput2); @@ -163,9 +171,10 @@ public class PrefetchRecordsPublisherIntegrationTest { @Test public void testDifferentShardCaches() { final ExecutorService executorService2 = spy(Executors.newFixedThreadPool(1)); - final KinesisDataFetcher kinesisDataFetcher = spy(new KinesisDataFetcher(kinesisClient, streamName, shardId, MAX_RECORDS_PER_CALL, NULL_METRICS_FACTORY)); + final KinesisDataFetcher kinesisDataFetcher = spy( + new KinesisDataFetcher(kinesisClient, streamName, shardId, MAX_RECORDS_PER_CALL, NULL_METRICS_FACTORY)); final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy2 = - spy(new AsynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher, 5 , 5, shardId)); + spy(new AsynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher, 5, 5, shardId)); final PrefetchRecordsPublisher recordsPublisher2 = new PrefetchRecordsPublisher( MAX_SIZE, MAX_BYTE_SIZE, @@ -206,23 +215,28 @@ public class PrefetchRecordsPublisherIntegrationTest { recordsPublisher2.shutdown(); sleep(100L); verify(executorService2).shutdownNow(); -// verify(getRecordsRetrievalStrategy2).shutdown(); + // verify(getRecordsRetrievalStrategy2).shutdown(); } @Test public void testExpiredIteratorException() { - when(dataFetcher.getRecords()).thenAnswer(new Answer() { - @Override - public DataFetcherResult answer(final InvocationOnMock invocationOnMock) throws Throwable { - throw ExpiredIteratorException.builder().message("ExpiredIterator").build(); - } - }).thenCallRealMethod(); + when(dataFetcher.getRecords()) + .thenAnswer(new Answer() { + @Override + public DataFetcherResult answer(final InvocationOnMock invocationOnMock) throws Throwable { + throw ExpiredIteratorException.builder() + .message("ExpiredIterator") + .build(); + } + }) + .thenCallRealMethod(); doNothing().when(dataFetcher).restartIterator(); getRecordsCache.start(extendedSequenceNumber, initialPosition); sleep(IDLE_MILLIS_BETWEEN_CALLS); - ProcessRecordsInput processRecordsInput = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); assertNotNull(processRecordsInput); @@ -233,17 +247,26 @@ public class PrefetchRecordsPublisherIntegrationTest { @Test public void testExpiredIteratorExceptionWithInnerRestartIteratorException() { when(dataFetcher.getRecords()) - .thenThrow(ExpiredIteratorException.builder().message("ExpiredIterator").build()) + .thenThrow(ExpiredIteratorException.builder() + .message("ExpiredIterator") + .build()) .thenCallRealMethod() - .thenThrow(ExpiredIteratorException.builder().message("ExpiredIterator").build()) + .thenThrow(ExpiredIteratorException.builder() + .message("ExpiredIterator") + .build()) .thenCallRealMethod(); doThrow(IllegalStateException.class).when(dataFetcher).restartIterator(); getRecordsCache.start(extendedSequenceNumber, initialPosition); - final boolean conditionSatisfied = blockUntilConditionSatisfied(() -> - getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 5000); + final boolean conditionSatisfied = blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 5000); Assert.assertTrue(conditionSatisfied); // Asserts the exception was only thrown once for restartIterator verify(dataFetcher, times(2)).restartIterator(); @@ -258,26 +281,32 @@ public class PrefetchRecordsPublisherIntegrationTest { getRecordsCache.shutdown(); sleep(100L); verify(executorService).shutdown(); -// verify(getRecordsRetrievalStrategy).shutdown(); + // verify(getRecordsRetrievalStrategy).shutdown(); } private void sleep(long millis) { try { Thread.sleep(millis); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } } private class KinesisDataFetcherForTest extends KinesisDataFetcher { - public KinesisDataFetcherForTest(final KinesisAsyncClient kinesisClient, - final String streamName, - final String shardId, - final int maxRecords) { + public KinesisDataFetcherForTest( + final KinesisAsyncClient kinesisClient, + final String streamName, + final String shardId, + final int maxRecords) { super(kinesisClient, streamName, shardId, maxRecords, NULL_METRICS_FACTORY); } @Override public DataFetcherResult getRecords() { - GetRecordsResponse getRecordsResult = GetRecordsResponse.builder().records(new ArrayList<>(records)).nextShardIterator(nextShardIterator).millisBehindLatest(1000L).build(); + GetRecordsResponse getRecordsResult = GetRecordsResponse.builder() + .records(new ArrayList<>(records)) + .nextShardIterator(nextShardIterator) + .millisBehindLatest(1000L) + .build(); return new AdvancingResult(getRecordsResult); } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java index 55d76432..a046e6b9 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java @@ -15,32 +15,6 @@ package software.amazon.kinesis.retrieval.polling; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static software.amazon.kinesis.utils.BlockingUtils.blockUntilConditionSatisfied; -import static software.amazon.kinesis.utils.BlockingUtils.blockUntilRecordsAvailable; -import static software.amazon.kinesis.utils.ProcessRecordsInputMatcher.eqProcessRecordsInput; - import java.time.Duration; import java.time.Instant; import java.util.ArrayList; @@ -59,6 +33,9 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; +import io.reactivex.rxjava3.core.Flowable; +import io.reactivex.rxjava3.schedulers.Schedulers; +import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.junit.After; import org.junit.Assert; @@ -66,26 +43,23 @@ import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.invocation.InvocationOnMock; import org.mockito.runners.MockitoJUnitRunner; import org.mockito.stubbing.Answer; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; - -import io.reactivex.rxjava3.core.Flowable; -import io.reactivex.rxjava3.schedulers.Schedulers; -import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.services.kinesis.model.ChildShard; import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.InvalidArgumentException; import software.amazon.awssdk.services.kinesis.model.Record; import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.common.RequestDetails; -import software.amazon.kinesis.leases.ShardObjectHelper; import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.leases.ShardObjectHelper; import software.amazon.kinesis.lifecycle.ShardConsumerNotifyingSubscriber; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.metrics.NullMetricsFactory; @@ -95,6 +69,33 @@ import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.RecordsRetrieved; import software.amazon.kinesis.retrieval.RetryableRetrievalException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import software.amazon.kinesis.utils.BlockingUtils; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyInt; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.kinesis.utils.BlockingUtils.blockUntilConditionSatisfied; +import static software.amazon.kinesis.utils.ProcessRecordsInputMatcher.eqProcessRecordsInput; /** * Test class for the PrefetchRecordsPublisher class. @@ -107,16 +108,19 @@ public class PrefetchRecordsPublisherTest { private static final int MAX_RECORDS_PER_CALL = 10000; private static final int MAX_SIZE = 5; private static final int MAX_RECORDS_COUNT = 15000; - private static final long IDLE_MILLIS_BETWEEN_CALLS = 0L; - private static final long AWAIT_TERMINATION_TIMEOUT = 1L; private static final String NEXT_SHARD_ITERATOR = "testNextShardIterator"; + private static final long DEFAULT_TIMEOUT_MILLIS = Duration.ofSeconds(1).toMillis(); + @Mock private GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; + @Mock private DataFetcher dataFetcher; + @Mock private InitialPositionInStreamExtended initialPosition; + @Mock private ExtendedSequenceNumber sequenceNumber; @@ -124,31 +128,22 @@ public class PrefetchRecordsPublisherTest { private ExecutorService executorService; private LinkedBlockingQueue spyQueue; private PrefetchRecordsPublisher getRecordsCache; - private String operation = "ProcessTask"; private GetRecordsResponse getRecordsResponse; private Record record; - private RequestDetails requestDetails; @Before public void setup() { when(getRecordsRetrievalStrategy.dataFetcher()).thenReturn(dataFetcher); when(dataFetcher.getStreamIdentifier()).thenReturn(StreamIdentifier.singleStreamInstance("testStream")); executorService = spy(Executors.newFixedThreadPool(1)); - getRecordsCache = new PrefetchRecordsPublisher( - MAX_SIZE, - 3 * SIZE_1_MB, - MAX_RECORDS_COUNT, - MAX_RECORDS_PER_CALL, - getRecordsRetrievalStrategy, - executorService, - IDLE_MILLIS_BETWEEN_CALLS, - new NullMetricsFactory(), - operation, - "shardId", - AWAIT_TERMINATION_TIMEOUT); + getRecordsCache = createPrefetchRecordsPublisher(0L); spyQueue = spy(getRecordsCache.getPublisherSession().prefetchRecordsQueue()); records = spy(new ArrayList<>()); - getRecordsResponse = GetRecordsResponse.builder().records(records).nextShardIterator(NEXT_SHARD_ITERATOR).childShards(new ArrayList<>()).build(); + getRecordsResponse = GetRecordsResponse.builder() + .records(records) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .childShards(Collections.emptyList()) + .build(); when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenReturn(getRecordsResponse); } @@ -158,24 +153,45 @@ public class PrefetchRecordsPublisherTest { getRecordsCache.start(sequenceNumber, initialPosition); getRecordsCache.start(sequenceNumber, initialPosition); getRecordsCache.start(sequenceNumber, initialPosition); - verify(dataFetcher, times(1)).initialize(any(ExtendedSequenceNumber.class), any()); + verify(dataFetcher).initialize(any(ExtendedSequenceNumber.class), any()); } @Test public void testPrefetchPublisherInternalStateNotModifiedWhenPrefetcherThreadStartFails() { - doThrow(new RejectedExecutionException()).doThrow(new RejectedExecutionException()).doCallRealMethod() - .when(executorService).execute(any()); + doThrow(new RejectedExecutionException()) + .doThrow(new RejectedExecutionException()) + .doCallRealMethod() + .when(executorService) + .execute(any()); // Initialize try 1 tryPrefetchCacheStart(); - blockUntilConditionSatisfied(() -> getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 300); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); verifyInternalState(0); // Initialize try 2 tryPrefetchCacheStart(); - blockUntilConditionSatisfied(() -> getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 300); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); verifyInternalState(0); // Initialize try 3 tryPrefetchCacheStart(); - blockUntilConditionSatisfied(() -> getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 300); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); verifyInternalState(MAX_SIZE); verify(dataFetcher, times(3)).initialize(any(ExtendedSequenceNumber.class), any()); } @@ -189,7 +205,9 @@ public class PrefetchRecordsPublisherTest { } private void verifyInternalState(int queueSize) { - Assert.assertTrue(getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == queueSize); + assertEquals( + queueSize, + getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); } @Test @@ -198,13 +216,11 @@ public class PrefetchRecordsPublisherTest { when(records.size()).thenReturn(1000); - final List expectedRecords = records.stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + final List expectedRecords = + records.stream().map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); getRecordsCache.start(sequenceNumber, initialPosition); - ProcessRecordsInput result = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, - "shardId"), 1000L) - .processRecordsInput(); + ProcessRecordsInput result = blockUntilRecordsAvailable().processRecordsInput(); assertEquals(expectedRecords, result.records()); assertEquals(new ArrayList<>(), result.childShards()); @@ -215,71 +231,42 @@ public class PrefetchRecordsPublisherTest { @Test(expected = RuntimeException.class) public void testGetRecordsWithInitialFailures_LessThanRequiredWait_Throws() { - // Create a new PrefetchRecordsPublisher with 1s idle time between get calls - getRecordsCache = new PrefetchRecordsPublisher( - MAX_SIZE, - 3 * SIZE_1_MB, - MAX_RECORDS_COUNT, - MAX_RECORDS_PER_CALL, - getRecordsRetrievalStrategy, - executorService, - 1000, - new NullMetricsFactory(), - operation, - "shardId", - AWAIT_TERMINATION_TIMEOUT); + getRecordsCache = createPrefetchRecordsPublisher(Duration.ofSeconds(1).toMillis()); // Setup the retrieval strategy to fail initial calls before succeeding - when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenThrow(new - RetryableRetrievalException("Timed out")).thenThrow(new - RetryableRetrievalException("Timed out again")).thenReturn(getRecordsResponse); + when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))) + .thenThrow(new RetryableRetrievalException("Timed out")) + .thenThrow(new RetryableRetrievalException("Timed out again")) + .thenReturn(getRecordsResponse); record = Record.builder().data(createByteBufferWithSize(SIZE_512_KB)).build(); when(records.size()).thenReturn(1000); - final List expectedRecords = records.stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); - getRecordsCache.start(sequenceNumber, initialPosition); - ProcessRecordsInput result = null; // Setup timeout to be less than what the PrefetchRecordsPublisher will need based on the idle time between // get calls to validate exception is thrown - result = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, - "shardId"), 1000L) - .processRecordsInput(); + blockUntilRecordsAvailable(); } @Test public void testGetRecordsWithInitialFailures_AdequateWait_Success() { - // Create a new PrefetchRecordsPublisher with 1s idle time between get calls - getRecordsCache = new PrefetchRecordsPublisher( - MAX_SIZE, - 3 * SIZE_1_MB, - MAX_RECORDS_COUNT, - MAX_RECORDS_PER_CALL, - getRecordsRetrievalStrategy, - executorService, - 1000, - new NullMetricsFactory(), - operation, - "shardId", - AWAIT_TERMINATION_TIMEOUT); + getRecordsCache = createPrefetchRecordsPublisher(Duration.ofSeconds(1).toMillis()); // Setup the retrieval strategy to fail initial calls before succeeding - when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenThrow(new - RetryableRetrievalException("Timed out")).thenThrow(new - RetryableRetrievalException("Timed out again")).thenReturn(getRecordsResponse); + when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))) + .thenThrow(new RetryableRetrievalException("Timed out")) + .thenThrow(new RetryableRetrievalException("Timed out again")) + .thenReturn(getRecordsResponse); record = Record.builder().data(createByteBufferWithSize(SIZE_512_KB)).build(); when(records.size()).thenReturn(1000); - final List expectedRecords = records.stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + final List expectedRecords = + records.stream().map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); getRecordsCache.start(sequenceNumber, initialPosition); ProcessRecordsInput result = null; // Setup timeout to be more than what the PrefetchRecordsPublisher will need based on the idle time between // get calls and then validate the mocks later - result = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, - "shardId"), 4000L) + result = BlockingUtils.blockUntilRecordsAvailable(this::evictPublishedEvent, 4000L) .processRecordsInput(); assertEquals(expectedRecords, result.records()); @@ -296,15 +283,15 @@ public class PrefetchRecordsPublisherTest { when(records.size()).thenReturn(1000); - GetRecordsResponse response = GetRecordsResponse.builder().records(records).build(); + GetRecordsResponse response = + GetRecordsResponse.builder().records(records).build(); when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenReturn(response); when(dataFetcher.isShardEndReached()).thenReturn(false); getRecordsCache.start(sequenceNumber, initialPosition); try { - ProcessRecordsInput result = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) - .processRecordsInput(); + blockUntilRecordsAvailable(); } catch (Exception e) { assertEquals("No records found", e.getMessage()); } @@ -332,13 +319,15 @@ public class PrefetchRecordsPublisherTest { childShards.add(leftChild); childShards.add(rightChild); - GetRecordsResponse response = GetRecordsResponse.builder().records(records).childShards(childShards).build(); + GetRecordsResponse response = GetRecordsResponse.builder() + .records(records) + .childShards(childShards) + .build(); when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenReturn(response); when(dataFetcher.isShardEndReached()).thenReturn(true); getRecordsCache.start(sequenceNumber, initialPosition); - ProcessRecordsInput result = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) - .processRecordsInput(); + ProcessRecordsInput result = blockUntilRecordsAvailable().processRecordsInput(); assertEquals(expectedRecords, result.records()); assertEquals(childShards, result.childShards()); @@ -373,11 +362,11 @@ public class PrefetchRecordsPublisherTest { sleep(2000); - int callRate = (int) Math.ceil((double) MAX_RECORDS_COUNT/recordsSize); + int callRate = (int) Math.ceil((double) MAX_RECORDS_COUNT / recordsSize); // TODO: fix this verification // verify(getRecordsRetrievalStrategy, times(callRate)).getRecords(MAX_RECORDS_PER_CALL); // assertEquals(spyQueue.size(), callRate); - assertTrue("Call Rate is "+callRate,callRate < MAX_SIZE); + assertTrue("Call Rate is " + callRate, callRate < MAX_SIZE); } @Test @@ -402,11 +391,11 @@ public class PrefetchRecordsPublisherTest { record = Record.builder().data(createByteBufferWithSize(1024)).build(); IntStream.range(0, recordsSize).forEach(i -> records.add(record)); - final List expectedRecords = records.stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + final List expectedRecords = + records.stream().map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); getRecordsCache.start(sequenceNumber, initialPosition); - ProcessRecordsInput processRecordsInput = evictPublishedEvent(getRecordsCache, "shardId").processRecordsInput(); + ProcessRecordsInput processRecordsInput = evictPublishedEvent().processRecordsInput(); verify(executorService).execute(any()); assertEquals(expectedRecords, processRecordsInput.records()); @@ -415,7 +404,7 @@ public class PrefetchRecordsPublisherTest { sleep(2000); - ProcessRecordsInput processRecordsInput2 = evictPublishedEvent(getRecordsCache, "shardId").processRecordsInput(); + ProcessRecordsInput processRecordsInput2 = evictPublishedEvent().processRecordsInput(); assertNotEquals(processRecordsInput, processRecordsInput2); assertEquals(expectedRecords, processRecordsInput2.records()); assertNotEquals(processRecordsInput2.timeSpentInCache(), Duration.ZERO); @@ -424,28 +413,46 @@ public class PrefetchRecordsPublisherTest { } @Test(expected = IllegalStateException.class) - public void testGetNextRecordsWithoutStarting() { - verify(executorService, times(0)).execute(any()); - getRecordsCache.drainQueueForRequests(); + public void testSubscribeWithoutStarting() { + verify(executorService, never()).execute(any()); + Subscriber mockSubscriber = mock(Subscriber.class); + getRecordsCache.subscribe(mockSubscriber); } @Test(expected = IllegalStateException.class) - public void testCallAfterShutdown() { + public void testRequestRecordsOnSubscriptionAfterShutdown() { + GetRecordsResponse response = GetRecordsResponse.builder() + .records(Record.builder() + .data(SdkBytes.fromByteArray(new byte[] {1, 2, 3})) + .sequenceNumber("123") + .build()) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .build(); + when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenReturn(response); + + getRecordsCache.start(sequenceNumber, initialPosition); + + verify(getRecordsRetrievalStrategy, timeout(100).atLeastOnce()).getRecords(anyInt()); + when(executorService.isShutdown()).thenReturn(true); - getRecordsCache.drainQueueForRequests(); + Subscriber mockSubscriber = mock(Subscriber.class); + getRecordsCache.subscribe(mockSubscriber); + ArgumentCaptor subscriptionCaptor = ArgumentCaptor.forClass(Subscription.class); + verify(mockSubscriber).onSubscribe(subscriptionCaptor.capture()); + subscriptionCaptor.getValue().request(1); } @Test public void testExpiredIteratorException() { - log.info("Starting tests"); - when(getRecordsRetrievalStrategy.getRecords(MAX_RECORDS_PER_CALL)).thenThrow(ExpiredIteratorException.class) + when(getRecordsRetrievalStrategy.getRecords(MAX_RECORDS_PER_CALL)) + .thenThrow(ExpiredIteratorException.class) .thenReturn(getRecordsResponse); getRecordsCache.start(sequenceNumber, initialPosition); doNothing().when(dataFetcher).restartIterator(); - blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L); + blockUntilRecordsAvailable(); sleep(1000); @@ -456,7 +463,6 @@ public class PrefetchRecordsPublisherTest { public void testExpiredIteratorExceptionWithIllegalStateException() { // This test validates that the daemon thread doesn't die when ExpiredIteratorException occurs with an // IllegalStateException. - when(getRecordsRetrievalStrategy.getRecords(MAX_RECORDS_PER_CALL)) .thenThrow(ExpiredIteratorException.builder().build()) .thenReturn(getRecordsResponse) @@ -466,7 +472,13 @@ public class PrefetchRecordsPublisherTest { doThrow(new IllegalStateException()).when(dataFetcher).restartIterator(); getRecordsCache.start(sequenceNumber, initialPosition); - blockUntilConditionSatisfied(() -> getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 300); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); // verify restartIterator was called verify(dataFetcher, times(2)).restartIterator(); @@ -474,14 +486,37 @@ public class PrefetchRecordsPublisherTest { @Test public void testRetryableRetrievalExceptionContinues() { - - GetRecordsResponse response = GetRecordsResponse.builder().millisBehindLatest(100L).records(Collections.emptyList()).nextShardIterator(NEXT_SHARD_ITERATOR).build(); - when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenThrow(new RetryableRetrievalException("Timeout", new TimeoutException("Timeout"))).thenReturn(response); + GetRecordsResponse response = GetRecordsResponse.builder() + .millisBehindLatest(100L) + .records(Collections.emptyList()) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .build(); + when(getRecordsRetrievalStrategy.getRecords(anyInt())) + .thenThrow(new RetryableRetrievalException("Timeout", new TimeoutException("Timeout"))) + .thenReturn(response); getRecordsCache.start(sequenceNumber, initialPosition); - RecordsRetrieved records = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000); - assertThat(records.processRecordsInput().millisBehindLatest(), equalTo(response.millisBehindLatest())); + RecordsRetrieved records = blockUntilRecordsAvailable(); + assertEquals(records.processRecordsInput().millisBehindLatest(), response.millisBehindLatest()); + } + + @Test + public void testInvalidArgumentExceptionIsRetried() { + when(getRecordsRetrievalStrategy.getRecords(MAX_RECORDS_PER_CALL)) + .thenThrow(InvalidArgumentException.builder().build()) + .thenReturn(getRecordsResponse); + + getRecordsCache.start(sequenceNumber, initialPosition); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); + + verify(dataFetcher, times(1)).restartIterator(); } @Test(timeout = 10000L) @@ -493,12 +528,15 @@ public class PrefetchRecordsPublisherTest { // If the test times out before starting the subscriber it means something went wrong while filling the queue. // After the subscriber is started one of the things that can trigger a timeout is a deadlock. // + final int[] sequenceNumberInResponse = {0}; - final int[] sequenceNumberInResponse = { 0 }; - - when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenAnswer( i -> GetRecordsResponse.builder().records( - Record.builder().data(SdkBytes.fromByteArray(new byte[] { 1, 2, 3 })).sequenceNumber(++sequenceNumberInResponse[0] + "").build()) - .nextShardIterator(NEXT_SHARD_ITERATOR).build()); + when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenAnswer(i -> GetRecordsResponse.builder() + .records(Record.builder() + .data(SdkBytes.fromByteArray(new byte[] {1, 2, 3})) + .sequenceNumber(++sequenceNumberInResponse[0] + "") + .build()) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .build()); getRecordsCache.start(sequenceNumber, initialPosition); @@ -510,15 +548,17 @@ public class PrefetchRecordsPublisherTest { Thread.yield(); } - log.info("Queue is currently at {} starting subscriber", getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); + log.info( + "Queue is currently at {} starting subscriber", + getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); AtomicInteger receivedItems = new AtomicInteger(0); final int expectedItems = MAX_SIZE * 10; Object lock = new Object(); - final boolean[] isRecordNotInorder = { false }; - final String[] recordNotInOrderMessage = { "" }; + final boolean[] isRecordNotInorder = {false}; + final String[] recordNotInOrderMessage = {""}; Subscriber delegateSubscriber = new Subscriber() { Subscription sub; @@ -534,11 +574,12 @@ public class PrefetchRecordsPublisherTest { public void onNext(RecordsRetrieved recordsRetrieved) { receivedItems.incrementAndGet(); if (Integer.parseInt(((PrefetchRecordsPublisher.PrefetchRecordsRetrieved) recordsRetrieved) - .lastBatchSequenceNumber()) != ++receivedSeqNum) { + .lastBatchSequenceNumber()) + != ++receivedSeqNum) { isRecordNotInorder[0] = true; recordNotInOrderMessage[0] = "Expected : " + receivedSeqNum + " Actual : " + ((PrefetchRecordsPublisher.PrefetchRecordsRetrieved) recordsRetrieved) - .lastBatchSequenceNumber(); + .lastBatchSequenceNumber(); } if (receivedItems.get() >= expectedItems) { synchronized (lock) { @@ -563,12 +604,15 @@ public class PrefetchRecordsPublisherTest { } }; - Subscriber subscriber = new ShardConsumerNotifyingSubscriber(delegateSubscriber, getRecordsCache); + Subscriber subscriber = + new ShardConsumerNotifyingSubscriber(delegateSubscriber, getRecordsCache); synchronized (lock) { log.info("Awaiting notification"); - Flowable.fromPublisher(getRecordsCache).subscribeOn(Schedulers.computation()) - .observeOn(Schedulers.computation(), true, 8).subscribe(subscriber); + Flowable.fromPublisher(getRecordsCache) + .subscribeOn(Schedulers.computation()) + .observeOn(Schedulers.computation(), true, 8) + .subscribe(subscriber); try { lock.wait(); } catch (InterruptedException e) { @@ -587,9 +631,13 @@ public class PrefetchRecordsPublisherTest { // // This test is to verify that the data consumption is not stuck in the case of an failed event delivery // to the subscriber. - GetRecordsResponse response = GetRecordsResponse.builder().records( - Record.builder().data(SdkBytes.fromByteArray(new byte[] { 1, 2, 3 })).sequenceNumber("123").build()) - .nextShardIterator(NEXT_SHARD_ITERATOR).build(); + GetRecordsResponse response = GetRecordsResponse.builder() + .records(Record.builder() + .data(SdkBytes.fromByteArray(new byte[] {1, 2, 3})) + .sequenceNumber("123") + .build()) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .build(); when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenReturn(response); getRecordsCache.start(sequenceNumber, initialPosition); @@ -602,7 +650,9 @@ public class PrefetchRecordsPublisherTest { Thread.yield(); } - log.info("Queue is currently at {} starting subscriber", getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); + log.info( + "Queue is currently at {} starting subscriber", + getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); AtomicInteger receivedItems = new AtomicInteger(0); final int expectedItems = MAX_SIZE * 20; @@ -648,8 +698,10 @@ public class PrefetchRecordsPublisherTest { synchronized (lock) { log.info("Awaiting notification"); - Flowable.fromPublisher(getRecordsCache).subscribeOn(Schedulers.computation()) - .observeOn(Schedulers.computation(), true, 8).subscribe(subscriber); + Flowable.fromPublisher(getRecordsCache) + .subscribeOn(Schedulers.computation()) + .observeOn(Schedulers.computation(), true, 8) + .subscribe(subscriber); try { lock.wait(); } catch (InterruptedException e) { @@ -662,51 +714,98 @@ public class PrefetchRecordsPublisherTest { @Test public void testResetClearsRemainingData() { - List responses = Stream.iterate(0, i -> i + 1).limit(10).map(i -> { - Record record = Record.builder().partitionKey("record-" + i).sequenceNumber("seq-" + i) - .data(SdkBytes.fromByteArray(new byte[] { 1, 2, 3 })).approximateArrivalTimestamp(Instant.now()) - .build(); - String nextIterator = "shard-iter-" + (i + 1); - return GetRecordsResponse.builder().records(record).nextShardIterator(nextIterator).build(); - }).collect(Collectors.toList()); + List responses = Stream.iterate(0, i -> i + 1) + .limit(10) + .map(i -> { + Record record = Record.builder() + .partitionKey("record-" + i) + .sequenceNumber("seq-" + i) + .data(SdkBytes.fromByteArray(new byte[] {1, 2, 3})) + .approximateArrivalTimestamp(Instant.now()) + .build(); + String nextIterator = "shard-iter-" + (i + 1); + return GetRecordsResponse.builder() + .records(record) + .nextShardIterator(nextIterator) + .build(); + }) + .collect(Collectors.toList()); RetrieverAnswer retrieverAnswer = new RetrieverAnswer(responses); when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenAnswer(retrieverAnswer); doAnswer(a -> { - String resetTo = a.getArgumentAt(0, String.class); - retrieverAnswer.resetIteratorTo(resetTo); - return null; - }).when(dataFetcher).resetIterator(anyString(), anyString(), any()); + String resetTo = a.getArgumentAt(0, String.class); + retrieverAnswer.resetIteratorTo(resetTo); + return null; + }) + .when(dataFetcher) + .resetIterator(anyString(), anyString(), any()); getRecordsCache.start(sequenceNumber, initialPosition); - RecordsRetrieved lastProcessed = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000); - RecordsRetrieved expected = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000); + RecordsRetrieved lastProcessed = blockUntilRecordsAvailable(); + RecordsRetrieved expected = blockUntilRecordsAvailable(); // // Skip some of the records the cache // - blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000); - blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000); + blockUntilRecordsAvailable(); + blockUntilRecordsAvailable(); verify(getRecordsRetrievalStrategy, atLeast(2)).getRecords(anyInt()); - while(getRecordsCache.getPublisherSession().prefetchRecordsQueue().remainingCapacity() > 0) { + while (getRecordsCache.getPublisherSession().prefetchRecordsQueue().remainingCapacity() > 0) { Thread.yield(); } getRecordsCache.restartFrom(lastProcessed); - RecordsRetrieved postRestart = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000); + RecordsRetrieved postRestart = blockUntilRecordsAvailable(); assertThat(postRestart.processRecordsInput(), eqProcessRecordsInput(expected.processRecordsInput())); - verify(dataFetcher).resetIterator(eq(responses.get(0).nextShardIterator()), - eq(responses.get(0).records().get(0).sequenceNumber()), any()); - + verify(dataFetcher) + .resetIterator( + eq(responses.get(0).nextShardIterator()), + eq(responses.get(0).records().get(0).sequenceNumber()), + any()); } - private RecordsRetrieved evictPublishedEvent(PrefetchRecordsPublisher publisher, String shardId) { - return publisher.getPublisherSession().evictPublishedRecordAndUpdateDemand(shardId); + /** + * Tests that a thrown {@link SdkException} doesn't cause a retry storm. + */ + @Test(expected = RuntimeException.class) + public void testRepeatSdkExceptionLoop() { + final int expectedFailedCalls = 4; + getRecordsCache = createPrefetchRecordsPublisher(DEFAULT_TIMEOUT_MILLIS / expectedFailedCalls); + getRecordsCache.start(sequenceNumber, initialPosition); + + try { + // return a valid response to cause `lastSuccessfulCall` to initialize + when(getRecordsRetrievalStrategy.getRecords(anyInt())) + .thenReturn(GetRecordsResponse.builder().build()); + blockUntilRecordsAvailable(); + } catch (RuntimeException re) { + Assert.fail("first call should succeed"); + } + + try { + when(getRecordsRetrievalStrategy.getRecords(anyInt())) + .thenThrow(SdkException.builder() + .message("lose yourself to dance") + .build()); + blockUntilRecordsAvailable(); + } finally { + // the successful call is the +1 + verify(getRecordsRetrievalStrategy, times(expectedFailedCalls + 1)).getRecords(anyInt()); + } + } + + private RecordsRetrieved blockUntilRecordsAvailable() { + return BlockingUtils.blockUntilRecordsAvailable(this::evictPublishedEvent, DEFAULT_TIMEOUT_MILLIS); + } + + private RecordsRetrieved evictPublishedEvent() { + return getRecordsCache.getPublisherSession().evictPublishedRecordAndUpdateDemand("shardId"); } private static class RetrieverAnswer implements Answer { @@ -721,7 +820,7 @@ public class PrefetchRecordsPublisherTest { public void resetIteratorTo(String nextIterator) { Iterator newIterator = responses.iterator(); - while(newIterator.hasNext()) { + while (newIterator.hasNext()) { GetRecordsResponse current = newIterator.next(); if (StringUtils.equals(nextIterator, current.nextShardIterator())) { if (!newIterator.hasNext()) { @@ -736,7 +835,7 @@ public class PrefetchRecordsPublisherTest { } @Override - public GetRecordsResponse answer(InvocationOnMock invocation) throws Throwable { + public GetRecordsResponse answer(InvocationOnMock invocation) { GetRecordsResponse response = iterator.next(); if (!iterator.hasNext()) { iterator = responses.iterator(); @@ -749,7 +848,7 @@ public class PrefetchRecordsPublisherTest { private static final int LOSS_EVERY_NTH_RECORD = 50; private static int recordCounter = 0; - private static final ScheduledExecutorService consumerHealthChecker = Executors.newScheduledThreadPool(1); + private static final ScheduledExecutorService CONSUMER_HEALTH_CHECKER = Executors.newScheduledThreadPool(1); public LossyNotificationSubscriber(Subscriber delegate, RecordsPublisher recordsPublisher) { super(delegate, recordsPublisher); @@ -762,15 +861,21 @@ public class PrefetchRecordsPublisherTest { getDelegateSubscriber().onNext(recordsRetrieved); } else { log.info("Record Loss Triggered"); - consumerHealthChecker.schedule(() -> { - getRecordsPublisher().restartFrom(recordsRetrieved); - Flowable.fromPublisher(getRecordsPublisher()).subscribeOn(Schedulers.computation()) - .observeOn(Schedulers.computation(), true, 8).subscribe(this); - }, 1000, TimeUnit.MILLISECONDS); + CONSUMER_HEALTH_CHECKER.schedule( + () -> { + getRecordsPublisher().restartFrom(recordsRetrieved); + Flowable.fromPublisher(getRecordsPublisher()) + .subscribeOn(Schedulers.computation()) + .observeOn(Schedulers.computation(), true, 8) + .subscribe(this); + }, + 1000, + TimeUnit.MILLISECONDS); } recordCounter++; } } + @After public void shutdown() { getRecordsCache.shutdown(); @@ -780,11 +885,26 @@ public class PrefetchRecordsPublisherTest { private void sleep(long millis) { try { Thread.sleep(millis); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } } private SdkBytes createByteBufferWithSize(int size) { return SdkBytes.fromByteArray(new byte[size]); } + private PrefetchRecordsPublisher createPrefetchRecordsPublisher(final long idleMillisBetweenCalls) { + return new PrefetchRecordsPublisher( + MAX_SIZE, + 3 * SIZE_1_MB, + MAX_RECORDS_COUNT, + MAX_RECORDS_PER_CALL, + getRecordsRetrievalStrategy, + executorService, + idleMillisBetweenCalls, + new NullMetricsFactory(), + PrefetchRecordsPublisherTest.class.getSimpleName(), + "shardId", + 1L); + } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java index ddc25e21..ad4a6046 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java @@ -14,16 +14,11 @@ */ package software.amazon.kinesis.retrieval.polling; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Mockito.when; - import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.retrieval.DataFetchingStrategy; @@ -31,14 +26,20 @@ import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; import software.amazon.kinesis.retrieval.RecordsFetcherFactory; import software.amazon.kinesis.retrieval.RecordsPublisher; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.when; + public class RecordsFetcherFactoryTest { private String shardId = "TestShard"; private RecordsFetcherFactory recordsFetcherFactory; @Mock private GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; + @Mock private MetricsFactory metricsFactory; + @Mock private DataFetcher dataFetcher; @@ -52,18 +53,18 @@ public class RecordsFetcherFactoryTest { @Test @Ignore -// TODO: remove test no longer holds true + // TODO: remove test no longer holds true public void createDefaultRecordsFetcherTest() { - RecordsPublisher recordsCache = recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, - metricsFactory, 1); + RecordsPublisher recordsCache = + recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, metricsFactory, 1); assertThat(recordsCache, instanceOf(BlockingRecordsPublisher.class)); } @Test public void createPrefetchRecordsFetcherTest() { recordsFetcherFactory.dataFetchingStrategy(DataFetchingStrategy.PREFETCH_CACHED); - RecordsPublisher recordsCache = recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, - metricsFactory, 1); + RecordsPublisher recordsCache = + recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, metricsFactory, 1); assertThat(recordsCache, instanceOf(PrefetchRecordsPublisher.class)); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/AWSResourceManager.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/AWSResourceManager.java new file mode 100644 index 00000000..f6be4692 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/AWSResourceManager.java @@ -0,0 +1,76 @@ +package software.amazon.kinesis.utils; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import lombok.NoArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.config.KCLAppConfig; + +@Slf4j +@NoArgsConstructor +public abstract class AWSResourceManager { + + /** + * Make delete resource API call for specific resource type + */ + public abstract void deleteResourceCall(String resourceName) throws Exception; + + /** + * Check if resource with given name is in active state + */ + public abstract boolean isResourceActive(String name); + + /** + * Get a list of all the names of resources of a specified type + * + * @throws Exception + */ + public abstract List getAllResourceNames() throws Exception; + + /** + * Delete resource with specified resource name + */ + public void deleteResource(String resourceName) throws Exception { + + try { + deleteResourceCall(resourceName); + } catch (Exception e) { + throw new Exception("Could not delete resource: {}", e); + } + + // Wait till resource is deleted to return + int i = 0; + while (true) { + i++; + if (i > 100) { + throw new RuntimeException("Failed resource deletion"); + } + try { + if (!isResourceActive(resourceName)) { + log.info("Successfully deleted the resource {}", resourceName); + return; + } + } catch (Exception e) { + try { + Thread.sleep(TimeUnit.SECONDS.toMillis(10)); + } catch (InterruptedException e1) { + } + log.info("Resource {} is not deleted yet, exception: ", resourceName); + } + } + } + + /** + * Delete all instances of a particular resource type + */ + public void deleteAllResource() throws Exception { + final List resourceNames = getAllResourceNames(); + for (String resourceName : resourceNames) { + // Delete all resources that have prefix "KCLRelease" + if (resourceName.startsWith(KCLAppConfig.INTEGRATION_TEST_RESOURCE_PREFIX)) { + deleteResource(resourceName); + } + } + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/BlockingUtils.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/BlockingUtils.java index 0d68e51b..a8c3b268 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/BlockingUtils.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/BlockingUtils.java @@ -21,7 +21,7 @@ public class BlockingUtils { public static Records blockUntilRecordsAvailable(Supplier recordsSupplier, long timeoutMillis) { Records recordsRetrieved; - while((recordsRetrieved = recordsSupplier.get()) == null && timeoutMillis > 0 ) { + while ((recordsRetrieved = recordsSupplier.get()) == null && timeoutMillis > 0) { try { Thread.sleep(100); } catch (InterruptedException e) { @@ -29,7 +29,7 @@ public class BlockingUtils { } timeoutMillis -= 100; } - if(recordsRetrieved != null) { + if (recordsRetrieved != null) { return recordsRetrieved; } else { throw new RuntimeException("No records found"); @@ -37,7 +37,7 @@ public class BlockingUtils { } public static boolean blockUntilConditionSatisfied(Supplier conditionSupplier, long timeoutMillis) { - while(!conditionSupplier.get() && timeoutMillis > 0 ) { + while (!conditionSupplier.get() && timeoutMillis > 0) { try { Thread.sleep(100); } catch (InterruptedException e) { @@ -47,5 +47,4 @@ public class BlockingUtils { } return conditionSupplier.get(); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/LeaseTableManager.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/LeaseTableManager.java new file mode 100644 index 00000000..4f0672d6 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/LeaseTableManager.java @@ -0,0 +1,73 @@ +package software.amazon.kinesis.utils; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import lombok.AllArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest; +import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; +import software.amazon.awssdk.services.dynamodb.model.DescribeTableResponse; +import software.amazon.awssdk.services.dynamodb.model.ListTablesRequest; +import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse; +import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; +import software.amazon.awssdk.services.dynamodb.model.TableStatus; +import software.amazon.kinesis.common.FutureUtils; + +@Slf4j +@AllArgsConstructor +public class LeaseTableManager extends AWSResourceManager { + + private final DynamoDbAsyncClient dynamoClient; + + public boolean isResourceActive(String tableName) { + final DescribeTableRequest request = + DescribeTableRequest.builder().tableName(tableName).build(); + final CompletableFuture describeTableResponseCompletableFuture = + dynamoClient.describeTable(request); + + try { + final DescribeTableResponse response = describeTableResponseCompletableFuture.get(30, TimeUnit.SECONDS); + boolean isActive = response.table().tableStatus().equals(TableStatus.ACTIVE); + if (!isActive) { + throw new RuntimeException("Table is not active, instead in status: " + + response.table().tableStatus()); + } + return true; + } catch (ExecutionException e) { + if (e.getCause() instanceof ResourceNotFoundException) { + return false; + } else { + throw new RuntimeException(e); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void deleteResourceCall(String tableName) throws Exception { + final DeleteTableRequest request = + DeleteTableRequest.builder().tableName(tableName).build(); + FutureUtils.resolveOrCancelFuture(dynamoClient.deleteTable(request), Duration.ofSeconds(60)); + } + + public List getAllResourceNames() throws Exception { + ListTablesRequest listTableRequest = ListTablesRequest.builder().build(); + List allTableNames = new ArrayList<>(); + ListTablesResponse result = null; + do { + result = FutureUtils.resolveOrCancelFuture( + dynamoClient.listTables(listTableRequest), Duration.ofSeconds(60)); + allTableNames.addAll(result.tableNames()); + listTableRequest = ListTablesRequest.builder() + .exclusiveStartTableName(result.lastEvaluatedTableName()) + .build(); + } while (result.lastEvaluatedTableName() != null); + return allTableNames; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ProcessRecordsInputMatcher.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ProcessRecordsInputMatcher.java index 1aeddc60..5c2e2f3f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ProcessRecordsInputMatcher.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ProcessRecordsInputMatcher.java @@ -15,16 +15,16 @@ package software.amazon.kinesis.utils; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + import lombok.Data; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.nullValue; @@ -38,11 +38,11 @@ public class ProcessRecordsInputMatcher extends TypeSafeDiagnosingMatcher e.getValue().matcher.matches(e.getValue().accessor.apply(item))).anyMatch(e -> { + .filter(e -> e.getValue().matcher.matches(e.getValue().accessor.apply(item))) + .anyMatch(e -> { mismatchDescription.appendText(e.getKey()).appendText(" "); e.getValue().matcher.describeMismatch(e.getValue().accessor.apply(item), mismatchDescription); return true; diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidationStatus.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidationStatus.java new file mode 100644 index 00000000..998d5c44 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidationStatus.java @@ -0,0 +1,10 @@ +package software.amazon.kinesis.utils; + +/** + * Possible outcomes for record validation in RecordValidatorQueue + */ +public enum RecordValidationStatus { + OUT_OF_ORDER, + MISSING_RECORD, + NO_ERROR +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueue.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueue.java new file mode 100644 index 00000000..50e993c9 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueue.java @@ -0,0 +1,66 @@ +package software.amazon.kinesis.utils; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import lombok.extern.slf4j.Slf4j; + +/** + * Class that maintains a dictionary that maps shard IDs to a list of records + * that are processed by that shard. + * Validation ensures that + * 1. The records processed by each shard are in increasing order (duplicates allowed) + * 2. The total number of unique records processed is equal to the number of records put on the stream + */ +@Slf4j +public class RecordValidatorQueue { + + private final ConcurrentHashMap> dict = new ConcurrentHashMap<>(); + + public void add(String shardId, String data) { + final List values = dict.computeIfAbsent(shardId, key -> new ArrayList<>()); + values.add(data); + } + + public RecordValidationStatus validateRecords(int expectedRecordCount) { + + // Validate that each List in the HashMap has data records in increasing order + for (Map.Entry> entry : dict.entrySet()) { + List recordsPerShard = entry.getValue(); + int prevVal = -1; + for (String record : recordsPerShard) { + int nextVal = Integer.parseInt(record); + if (prevVal > nextVal) { + log.error( + "The records are not in increasing order. Saw record data {} before {}.", prevVal, nextVal); + return RecordValidationStatus.OUT_OF_ORDER; + } + prevVal = nextVal; + } + } + + // Validate that no records are missing over all shards + int actualRecordCount = 0; + for (Map.Entry> entry : dict.entrySet()) { + List recordsPerShard = entry.getValue(); + Set noDupRecords = new HashSet(recordsPerShard); + actualRecordCount += noDupRecords.size(); + } + + // If this is true, then there was some record that was missed during processing. + if (actualRecordCount != expectedRecordCount) { + log.error( + "Failed to get correct number of records processed. Should be {} but was {}", + expectedRecordCount, + actualRecordCount); + return RecordValidationStatus.MISSING_RECORD; + } + + // Record validation succeeded. + return RecordValidationStatus.NO_ERROR; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueueTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueueTest.java new file mode 100644 index 00000000..c196aa54 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueueTest.java @@ -0,0 +1,44 @@ +package software.amazon.kinesis.utils; + +import org.junit.Assert; +import org.junit.Test; + +public class RecordValidatorQueueTest { + + private final RecordValidatorQueue recordValidator = new RecordValidatorQueue(); + + private static final String SHARD_ID = "ABC"; + + @Test + public void testValidationFailedRecordOutOfOrder() { + recordValidator.add(SHARD_ID, "0"); + recordValidator.add(SHARD_ID, "1"); + recordValidator.add(SHARD_ID, "3"); + recordValidator.add(SHARD_ID, "2"); + + RecordValidationStatus error = recordValidator.validateRecords(4); + Assert.assertEquals(RecordValidationStatus.OUT_OF_ORDER, error); + } + + @Test + public void testValidationFailedMissingRecord() { + recordValidator.add(SHARD_ID, "0"); + recordValidator.add(SHARD_ID, "1"); + recordValidator.add(SHARD_ID, "2"); + recordValidator.add(SHARD_ID, "3"); + + RecordValidationStatus error = recordValidator.validateRecords(5); + Assert.assertEquals(RecordValidationStatus.MISSING_RECORD, error); + } + + @Test + public void testValidRecords() { + recordValidator.add(SHARD_ID, "0"); + recordValidator.add(SHARD_ID, "1"); + recordValidator.add(SHARD_ID, "2"); + recordValidator.add(SHARD_ID, "3"); + + RecordValidationStatus error = recordValidator.validateRecords(4); + Assert.assertEquals(RecordValidationStatus.NO_ERROR, error); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ReshardOptions.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ReshardOptions.java new file mode 100644 index 00000000..f1513cfb --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ReshardOptions.java @@ -0,0 +1,21 @@ +package software.amazon.kinesis.utils; + +/** + * Specifies the types of resharding possible in integration tests + * Split doubles the number of shards. + * Merge halves the number of shards. + */ +public enum ReshardOptions { + SPLIT { + public int calculateShardCount(int currentShards) { + return (int) (2.0 * currentShards); + } + }, + MERGE { + public int calculateShardCount(int currentShards) { + return (int) (0.5 * currentShards); + } + }; + + public abstract int calculateShardCount(int currentShards); +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/StreamExistenceManager.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/StreamExistenceManager.java new file mode 100644 index 00000000..df0bcab3 --- /dev/null +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/StreamExistenceManager.java @@ -0,0 +1,236 @@ +package software.amazon.kinesis.utils; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import lombok.Value; +import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; +import software.amazon.awssdk.services.kinesis.model.ConsumerStatus; +import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DeleteStreamRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; +import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse; +import software.amazon.awssdk.services.kinesis.model.ListStreamsRequest; +import software.amazon.awssdk.services.kinesis.model.ListStreamsResponse; +import software.amazon.awssdk.services.kinesis.model.PutResourcePolicyRequest; +import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest; +import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse; +import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; +import software.amazon.awssdk.services.kinesis.model.StreamStatus; +import software.amazon.kinesis.common.FutureUtils; +import software.amazon.kinesis.config.KCLAppConfig; +import software.amazon.kinesis.config.RetrievalMode; + +@Value +@Slf4j +public class StreamExistenceManager extends AWSResourceManager { + private static final int CHECK_RESOURCE_ACTIVE_MAX_RETRIES = 3; + + private final KinesisAsyncClient client; + private final KCLAppConfig testConfig; + + public StreamExistenceManager(KCLAppConfig config) throws URISyntaxException, IOException { + this.testConfig = config; + this.client = config.buildAsyncKinesisClientForStreamOwner(); + } + + public boolean isResourceActive(String streamName) { + final DescribeStreamSummaryRequest request = + DescribeStreamSummaryRequest.builder().streamName(streamName).build(); + try { + final DescribeStreamSummaryResponse response = + FutureUtils.resolveOrCancelFuture(client.describeStreamSummary(request), Duration.ofSeconds(60)); + final boolean isActive = + response.streamDescriptionSummary().streamStatus().equals(StreamStatus.ACTIVE); + return isActive; + } catch (ExecutionException e) { + if (e.getCause() instanceof ResourceNotFoundException) { + return false; + } else { + throw new RuntimeException(e); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private boolean isConsumerActive(Arn consumerArn) { + final DescribeStreamConsumerRequest request = DescribeStreamConsumerRequest.builder() + .consumerARN(consumerArn.toString()) + .build(); + try { + final DescribeStreamConsumerResponse response = + FutureUtils.resolveOrCancelFuture(client.describeStreamConsumer(request), Duration.ofSeconds(60)); + final boolean isActive = + response.consumerDescription().consumerStatus().equals(ConsumerStatus.ACTIVE); + return isActive; + } catch (ExecutionException e) { + if (e.getCause() instanceof ResourceNotFoundException) { + return false; + } else { + throw new RuntimeException(e); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void deleteResourceCall(String streamName) throws Exception { + final DeleteStreamRequest request = DeleteStreamRequest.builder() + .streamName(streamName) + .enforceConsumerDeletion(true) + .build(); + client.deleteStream(request).get(30, TimeUnit.SECONDS); + } + + public List getAllResourceNames() throws Exception { + ListStreamsRequest listStreamRequest = ListStreamsRequest.builder().build(); + List allStreamNames = new ArrayList<>(); + ListStreamsResponse result = null; + do { + result = FutureUtils.resolveOrCancelFuture(client.listStreams(listStreamRequest), Duration.ofSeconds(60)); + allStreamNames.addAll(result.streamNames()); + listStreamRequest = ListStreamsRequest.builder() + .exclusiveStartStreamName(result.nextToken()) + .build(); + } while (result.hasMoreStreams()); + return allStreamNames; + } + + public void checkStreamsAndCreateIfNecessary() { + for (String streamName : testConfig.getStreamNames()) { + if (!isResourceActive(streamName)) { + createStream(streamName, testConfig.getShardCount()); + } + log.info("Using stream {} with region {}", streamName, testConfig.getRegion()); + } + + if (testConfig.isCrossAccount()) { + for (Arn streamArn : testConfig.getStreamArns()) { + log.info("Putting cross account stream resource policy for stream {}", streamArn); + putResourcePolicyForCrossAccount( + streamArn, + getCrossAccountStreamResourcePolicy(testConfig.getAccountIdForConsumer(), streamArn)); + } + } + } + + public Map createCrossAccountConsumerIfNecessary() throws Exception { + // For cross account, KCL cannot create the consumer automatically in another account, so + // we have to create it ourselves and provide the arn to the StreamConfig in multi-stream mode or + // RetrievalConfig in single-stream mode + if (testConfig.isCrossAccount() && testConfig.getRetrievalMode().equals(RetrievalMode.STREAMING)) { + final Map streamToConsumerArnsMap = new HashMap<>(); + for (Arn streamArn : testConfig.getStreamArns()) { + final Arn consumerArn = + registerConsumerAndWaitForActive(streamArn, KCLAppConfig.CROSS_ACCOUNT_CONSUMER_NAME); + putResourcePolicyForCrossAccount( + consumerArn, + getCrossAccountConsumerResourcePolicy(testConfig.getAccountIdForConsumer(), consumerArn)); + streamToConsumerArnsMap.put(streamArn, consumerArn); + } + return streamToConsumerArnsMap; + } + return null; + } + + private void putResourcePolicyForCrossAccount(Arn resourceArn, String policy) { + try { + final PutResourcePolicyRequest putResourcePolicyRequest = PutResourcePolicyRequest.builder() + .resourceARN(resourceArn.toString()) + .policy(policy) + .build(); + FutureUtils.resolveOrCancelFuture( + client.putResourcePolicy(putResourcePolicyRequest), Duration.ofSeconds(60)); + } catch (Exception e) { + throw new RuntimeException("Failed to PutResourcePolicy " + policy + " on resource " + resourceArn, e); + } + } + + private String getCrossAccountStreamResourcePolicy(String accountId, Arn streamArn) { + return "{\"Version\":\"2012-10-17\"," + + "\"Statement\":[{" + + "\"Effect\": \"Allow\"," + + "\"Principal\": {\"AWS\": \"" + accountId + "\"}," + + "\"Action\": [" + + "\"kinesis:DescribeStreamSummary\",\"kinesis:ListShards\",\"kinesis:PutRecord\",\"kinesis:PutRecords\"," + + "\"kinesis:GetRecords\",\"kinesis:GetShardIterator\"]," + + "\"Resource\": \"" + streamArn.toString() + "\"" + + "}]}"; + } + + private String getCrossAccountConsumerResourcePolicy(String accountId, Arn consumerArn) { + return "{\"Version\":\"2012-10-17\"," + + "\"Statement\":[{" + + "\"Effect\": \"Allow\"," + + "\"Principal\": {\"AWS\": \"" + accountId + "\"}," + + "\"Action\": [" + + "\"kinesis:DescribeStreamConsumer\",\"kinesis:SubscribeToShard\"]," + + "\"Resource\": \"" + consumerArn.toString() + "\"" + + "}]}"; + } + + private Arn registerConsumerAndWaitForActive(Arn streamArn, String consumerName) throws Exception { + final RegisterStreamConsumerRequest registerStreamConsumerRequest = RegisterStreamConsumerRequest.builder() + .streamARN(streamArn.toString()) + .consumerName(consumerName) + .build(); + final RegisterStreamConsumerResponse response = FutureUtils.resolveOrCancelFuture( + client.registerStreamConsumer(registerStreamConsumerRequest), Duration.ofSeconds(60)); + final Arn consumerArn = Arn.fromString(response.consumer().consumerARN()); + + int retries = 0; + while (!isConsumerActive(consumerArn)) { + log.info("Consumer {} is not active yet. Checking again in 5 seconds.", consumerArn); + if (retries > CHECK_RESOURCE_ACTIVE_MAX_RETRIES) { + throw new RuntimeException("Failed consumer registration, did not transition into active"); + } + try { + Thread.sleep(TimeUnit.SECONDS.toMillis(5)); + } catch (InterruptedException e) { + log.error("Failed to sleep"); + } + retries++; + } + log.info("Successfully registered consumer {}", consumerArn); + return consumerArn; + } + + private void createStream(String streamName, int shardCount) { + final CreateStreamRequest request = CreateStreamRequest.builder() + .streamName(streamName) + .shardCount(shardCount) + .build(); + try { + client.createStream(request).get(30, TimeUnit.SECONDS); + } catch (Exception e) { + throw new RuntimeException("Failed to create stream with name " + streamName, e); + } + + int retries = 0; + while (!isResourceActive(streamName)) { + log.info("Stream {} is not active yet. Checking again in 5 seconds.", streamName); + if (retries > CHECK_RESOURCE_ACTIVE_MAX_RETRIES) { + throw new RuntimeException("Failed stream creation, did not transition into active"); + } + try { + Thread.sleep(TimeUnit.SECONDS.toMillis(5)); + } catch (InterruptedException e) { + log.error("Failed to sleep"); + } + retries++; + } + log.info("Successfully created the stream {}", streamName); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/SubscribeToShardRequestMatcher.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/SubscribeToShardRequestMatcher.java index d120d95a..52c50e05 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/SubscribeToShardRequestMatcher.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/SubscribeToShardRequestMatcher.java @@ -6,15 +6,16 @@ import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; public class SubscribeToShardRequestMatcher extends ArgumentMatcher { private SubscribeToShardRequest left; + public SubscribeToShardRequestMatcher(SubscribeToShardRequest left) { super(); this.left = left; } public boolean matches(Object rightObject) { - SubscribeToShardRequest right = (SubscribeToShardRequest)rightObject; - return left.shardId().equals(right.shardId()) && - left.consumerARN().equals(right.consumerARN()) && - left.startingPosition().equals(right.startingPosition()); + SubscribeToShardRequest right = (SubscribeToShardRequest) rightObject; + return left.shardId().equals(right.shardId()) + && left.consumerARN().equals(right.consumerARN()) + && left.startingPosition().equals(right.startingPosition()); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamlet.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamlet.java index bc9ed265..9c776ce8 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamlet.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamlet.java @@ -21,30 +21,28 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; +import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.exceptions.InvalidStateException; import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; import software.amazon.kinesis.exceptions.KinesisClientLibNonRetryableException; import software.amazon.kinesis.exceptions.ShutdownException; import software.amazon.kinesis.exceptions.ThrottlingException; - -import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.leases.ShardSequenceVerifier; +import software.amazon.kinesis.lifecycle.ShutdownReason; import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; import software.amazon.kinesis.lifecycle.events.ShardEndedInput; import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; import software.amazon.kinesis.processor.RecordProcessorCheckpointer; import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.processor.ShutdownNotificationAware; import software.amazon.kinesis.retrieval.KinesisClientRecord; /** * Streamlet that tracks records it's seen - useful for testing. */ @Slf4j -public class TestStreamlet implements ShardRecordProcessor, ShutdownNotificationAware { +public class TestStreamlet implements ShardRecordProcessor { private List records = new ArrayList<>(); private Set processedSeqNums = new HashSet(); // used for deduping @@ -63,9 +61,7 @@ public class TestStreamlet implements ShardRecordProcessor, ShutdownNotification private final CountDownLatch notifyShutdownLatch = new CountDownLatch(1); private final CountDownLatch shutdownLatch = new CountDownLatch(1); - public TestStreamlet() { - - } + public TestStreamlet() {} public TestStreamlet(Semaphore sem, ShardSequenceVerifier shardSequenceVerifier) { this(); @@ -105,8 +101,10 @@ public class TestStreamlet implements ShardRecordProcessor, ShutdownNotification } try { checkpointer.checkpoint(); - } catch (ThrottlingException | ShutdownException - | KinesisClientLibDependencyException | InvalidStateException e) { + } catch (ThrottlingException + | ShutdownException + | KinesisClientLibDependencyException + | InvalidStateException e) { // Continue processing records and checkpoint next time if we get a transient error. // Don't checkpoint if the processor has been shutdown. log.debug("Caught exception while checkpointing: ", e); @@ -141,7 +139,8 @@ public class TestStreamlet implements ShardRecordProcessor, ShutdownNotification @Override public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) { - + shutdownNotificationCalled = true; + notifyShutdownLatch.countDown(); } /** @@ -169,12 +168,6 @@ public class TestStreamlet implements ShardRecordProcessor, ShutdownNotification return shutdownNotificationCalled; } - @Override - public void shutdownRequested(RecordProcessorCheckpointer checkpointer) { - shutdownNotificationCalled = true; - notifyShutdownLatch.countDown(); - } - public CountDownLatch getInitializeLatch() { return initializeLatch; } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamletFactory.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamletFactory.java index 455e8ed3..3bbeeb18 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamletFactory.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamletFactory.java @@ -19,8 +19,8 @@ import java.util.List; import java.util.concurrent.Semaphore; import software.amazon.kinesis.leases.ShardSequenceVerifier; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.processor.ShardRecordProcessor; +import software.amazon.kinesis.processor.ShardRecordProcessorFactory; /** * Factory for TestStreamlet record processors. @@ -31,7 +31,7 @@ public class TestStreamletFactory implements ShardRecordProcessorFactory { private Semaphore semaphore; private ShardSequenceVerifier shardSequenceVerifier; List testStreamlets = new ArrayList<>(); - + /** * Constructor. */ @@ -61,5 +61,4 @@ public class TestStreamletFactory implements ShardRecordProcessorFactory { public List getTestStreamlets() { return testStreamlets; } - } diff --git a/checkstyle/checkstyle-suppressions.xml b/checkstyle/checkstyle-suppressions.xml new file mode 100644 index 00000000..91be49c8 --- /dev/null +++ b/checkstyle/checkstyle-suppressions.xml @@ -0,0 +1,8 @@ + + + + + + \ No newline at end of file diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml new file mode 100644 index 00000000..6f3df770 --- /dev/null +++ b/checkstyle/checkstyle.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/FAQ.md b/docs/FAQ.md new file mode 100644 index 00000000..1e62f904 --- /dev/null +++ b/docs/FAQ.md @@ -0,0 +1,40 @@ +# Frequently Asked Questions (FAQ) + +--- + +## Stream Modality + +Questions related to stream modality (e.g., [MultiStreamTracker][multi-stream-tracker]). + +### What is the impact of transitioning my KCL app from single-stream to multi-stream? + +This answer assumes the [StreamTracker][stream-tracker] implementation is being changed. +From KCL's perspective, there is no modality change by decreasing a multi-stream tracker from `N` streams to `1` stream. + +The DDB `leaseKey`, used to persist metadata including lease checkpoint, has a modality-dependent format: + +| Modality | `leaseKey` Format | +| --- | --- | +| single-stream | `` | +| multi-stream | `:::` | + +Transitioning an app -- either from single- to multi-, or vice versa -- creates a backwards-incompatible expectation on the `leaseKey`. +As a result, a KCL app will be blind to any `leaseKey`, and its checkpoint, that does not match the expected format. +For leases that don't exist in the expected format, processing may start from the default checkpoint (e.g., `LATEST`). + +As an example of potential impact from switching modality, assume `LATEST` is the default initial position in stream. +When a KCL application's modality is switched, stream processing will start reading at this initial position ignoring the checkpoints from the previous modality. +The impact of this is that any records written to the stream between restarting the KCL app will not be processed. +If `TRIM_HORIZON` is used instead upon restarting the application, the application will start reading from its initial position solving for the gap in consumption of data but potentially resulting in duplicate consumption of records. +Thus, please make sure that your application can handle this. + +--- + +## Resources + +For additional information, please consider reading: +* https://docs.aws.amazon.com/streams/latest/dev/kcl-migration.html +* https://docs.aws.amazon.com/streams/latest/dev/shared-throughput-kcl-consumers.html + +[multi-stream-tracker]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java +[stream-tracker]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/StreamTracker.java \ No newline at end of file diff --git a/docs/images/lease-shard-sync-initialization.png b/docs/images/lease-shard-sync-initialization.png new file mode 100644 index 00000000..7f524efa Binary files /dev/null and b/docs/images/lease-shard-sync-initialization.png differ diff --git a/docs/images/lease-shard-sync-loop.png b/docs/images/lease-shard-sync-loop.png new file mode 100644 index 00000000..1a62b85b Binary files /dev/null and b/docs/images/lease-shard-sync-loop.png differ diff --git a/docs/images/lease-taking.png b/docs/images/lease-taking.png new file mode 100644 index 00000000..5c05590a Binary files /dev/null and b/docs/images/lease-taking.png differ diff --git a/docs/images/leases-and-operations.png b/docs/images/leases-and-operations.png new file mode 100644 index 00000000..178202ab Binary files /dev/null and b/docs/images/leases-and-operations.png differ diff --git a/docs/lease-lifecycle.md b/docs/lease-lifecycle.md new file mode 100644 index 00000000..8a91109d --- /dev/null +++ b/docs/lease-lifecycle.md @@ -0,0 +1,185 @@ +# Lease Lifecycle + +A lease is data that defines the binding between a worker and a shard. +Distributed KCL consumer applications use leases to partition data record processing across a fleet of workers. +At any given time, each shard of data records is bound to a particular worker by a lease identified by the `leaseKey` variable. + +This document describes the lease lifecycle. + +**Note:** shard ids are simplified from `shardId-000000000042` to read as `shardId[_-]42` for simplicity. + +## Leases + +In KCL, a lease provides a temporal assignment between one shard and an assigned worker. +Leases are persistent for the duration of shard processing (detailed later). +However, the worker that is processing a lease may change since [leases may be "stolen"](#lease-balancing) by other workers in the same KCL application. + +## Lease Table + +To persist metadata about lease state (e.g., [last read checkpoint, current assigned worker][kcl-concepts]), KCL creates a lease table in [DynamoDB][dynamodb]. +Each KCL application will have its own distinct lease table that includes the application name. +More information, including schema, is provided at [KCL Lease Table][kcl-leasetable]. + +## Lease Assignment + +Leases are unique to the shard and are not recycled for stream operations (i.e., split, merge). +A shard created by stream operations will generate a new lease. + +![Activity diagram of KCL shard-to-lease assignments. +shard-0 (lease-0) is unmodified. +shard-1 (lease-1) is split into shard-4 (lease-4) and shard-5 (lease-5). +shard-2 (lease-2) and shard-3 (lease-3) are merged into shard-6 (lease-6). +](images/leases-and-operations.png) + +It should be noted that the number of tracked leases may exceed the number of shards. +Per the diagram (above), this can occur when there are stream operations propagating through KCL. +For example, a 10-shard stream that is split on every shard may temporarily have up-to 30 leases: 10 original + 20 children. + +**Note:** Leases are uniquely identified by their `leaseKey` which looks vastly different than `lease_X`. +For details on the `leaseKey` format, please see [KCL LeaseTable][kcl-leasetable]. + +## Lease Lifecycle + +Leases follow a relatively simple, progressive state machine: +`DISCOVERY -> CREATION -> PROCESSING -> SHARD_END -> DELETION` + +Excluding `SHARD_END`, these phases are illustrative of KCL logic and are not explicitly codified. + +1. `DISCOVERY`: KCL [shard syncing](#shard-syncing) identifies new shards. +Discovered shards may result from: + * First time starting KCL with an empty lease table. + * Stream operations (i.e., split, merge) that create child shards. + * In multi-stream mode, dynamic discovery of a new stream. +1. `CREATION`: Leases are created 1:1 for each discovered shard. + * Leases are only created if they are eligible for processing. + For example, child shards will not have leases created until its parent(s) have reached `SHARD_END`. + * Leases are initialized at the configured initial position. + * A notable exception is that child leases are initialized at `TRIM_HORIZON` to avoid processing gaps from their parent lease(s). +1. `PROCESSING`: Leases are processed, and continually updated with new checkpoints. + * In general, leases spend the majority of their life in this state. +1. `SHARD_END`: The associated shard is `SHARD_END` and all records have been processed by KCL for the shard. +1. `DELETION`: Since there are no more records to process, KCL will delete the lease from the lease table. + * Lease deletion will not occur until after its child lease(s) enter `PROCESSING`. + * This tombstone helps KCL ensure durability and convergence for all discovered leases. + * For more information, see [LeaseCleanupManager#cleanupLeaseForCompletedShard(...)][lease-cleanup-manager-cleanupleaseforcompletedshard][^fixed-commit-footnote] + * [Deletion is configurable][lease-cleanup-config], yet recommended to minimize I/O of lease table scans. + +### Shard Syncing + +Shard syncing is a complex responsibility owned by the leader host in a KCL application. +By invoking the [ListShards API][list-shards], KCL will identify the shards for the configured stream(s). +This process is scheduled at a [configurable interval][lease-auditor-config] to determine whether a shard sync should be executed to identify new shards. +A shard sync is not guaranteed to identify new shards (e.g., KCL has already discovered all existing shards). + +The following diagram is an abridged sequence diagram of key classes that initialize the shard sync workflow: +![Abridged sequence diagram of the Shard Sync initialization process. +Listed participants are the Scheduler, LeaseCoordinator, PeriodicShardSyncManager, and Lease Table (DynamoDB). +Scheduler initializes the LeaseCoordinator which, in turn, creates the lease table if it does not exist. +Finally, Scheduler starts the PeriodicShardSyncManager which schedules itself to execute every leasesRecoveryAuditorExecutionFrequencyMillis. +](images/lease-shard-sync-initialization.png) + +The following diagram outlines the key classes involved in the shard sync workflow: +![Abridged sequence diagram of the Shard Sync main processing loop. +Listed participants are the PeriodicShardSyncManager, ShardSyncTask, ShardDetector, +HierarchicalShardSyncer, LeaseRefresher, LeaseSynchronizer, and Lease Table (DynamoDB). +On each iteration, PeriodicShardSyncManager determines whether it's the leader and a shard sync is required before proceeding. +PeriodicShardSyncManager calls ShardSyncTask which calls HierarchicalShardSyncer which acquires the shard lists from ShardDetector. +HierarchicalShardSyncer then invokes LeaseRefresher to scan the lease table, and uses those returned leases to identify shards which do not have leases. +Finally, HierarchicalShardSyncer uses LeaseRefresher to create any new leases in the lease table. +](images/lease-shard-sync-loop.png) + +For more information, here are the links to KCL code: +* `Scheduler`: [implementation][scheduler] +* `LeaseCoordinator`: [interface][lease-coordinator], [implementation][lease-coordinator-impl] +* `PeriodicShardSyncManager`: [implementation][periodic-shard-sync-manager] +* `ShardSyncTask`: [interface][consumer-task], [implementation][consumer-task-impl] +* `ShardDetector`: [interface][shard-detector], [implementation][shard-detector-impl] +* `HierarchicalShardSyncer`: [implementation][hierarchical-shard-syncer] +* `LeaseRefresher`: [interface][lease-refresher], [implementation][lease-refresher-impl] +* `LeaseSynchronizer`: [implementation][non-empty-lease-table-synchronizer] + +Lease creation is a deterministic process. +This is illustrative of how KCL operates. +Assume a stream has the following shard hierarchy: +
    +Shard structure (each level depicts a stream segment):
    +  0 1 2 3 4   5    - shards till epoch 102
    +  \ / \ / |   |
    +   6   7  4   5    - shards from epoch 103 - 205
    +   \  /   |  / \
    +     8    4 9  10  - shards from epoch 206+ (still open)
    +
    + +Then [NonEmptyLeaseTableSynchronizer][non-empty-lease-table-synchronizer] +would create leases dependent on the configured initial position. +Assuming leases `(4, 5, 7)` already exist, the leases created for an initial position would be: +* `LATEST` creates `(6)` to resolve the gap on-par with epochs 103-205 which is required to eventually reach `LATEST` +* `TRIM_HORIZON` creates `(0, 1)` to resolve the gap starting from the `TRIM_HORIZON` +* `AT_TIMESTAMP(epoch=200)` creates `(0, 1)` to resolve the gap leading into epoch 200 + +#### Avoiding a Shard Sync + +To reduce Kinesis Data Streams API calls, KCL will attempt to avoid unnecessary shard syncs. +For example, if the discovered shards cover the entire partition range then a shard sync is unlikely to yield a material difference. +For more information, see +[PeriodicShardSyncManager#checkForShardSync(...)][periodic-shard-sync-manager-checkforshardsync])[^fixed-commit-footnote]. + +## Lease Balancing + +KCL balances leases across workers at an interval configured by `leaseDuration` and `epsilonMillis`. +Lease balancing is done to protect against interruptions in processing should a worker stop updating the lease table (e.g., host failure). +This operation only accounts for lease assignments and does not factor in I/O load. +For example, leases that are equally-distributed across KCL are not guaranteed to have equal I/O distribution. + +![Sequence diagram of the KCL Lease Taking workflow. +Participants include the LeaseCoordinator, LeaseTaker, LeaseRefresher, and Lease Table (DynamoDB). +LeaseRefresher is leveraged to acquire the leases from the lease table. +LeaseTaker identifies which leases are eligible for taking/stealing. +All taken/stolen leases are passed through LeaseRefresher to update the lease table. +](images/lease-taking.png) + +For more information, here are the links to KCL code: +* `LeaseCoordinator`: [interface][lease-coordinator], [implementation][lease-coordinator-impl] +* `LeaseTaker`: [interface][lease-taker], [implementation][lease-taker-impl] +* `LeaseRefresher`: [interface][lease-refresher], [implementation][lease-refresher-impl] + +Leases are stolen if-and-only-if there were zero expired leases and the looking-to-steal-worker desires more leases. +Stolen leases are randomly selected from whichever worker has the most leases. +The maximum number of leases to steal on each loop is configured via [maxLeasesToStealAtOneTime][max-leases-to-steal-config]. + +Customers should consider the following trade-offs when configuring the lease-taking cadence: +1. `LeaseRefresher` invokes a DynamoDB `scan` against the lease table which has a cost proportional to the number of leases. +1. Frequent balancing may cause high lease turn-over which incurs DynamoDB `write` costs, and potentially redundant work for stolen leases. +1. Low `maxLeasesToStealAtOneTime` may increase the time to fully (re)assign leases after an impactful event (e.g., deployment, host failure). + +# Additional Reading + +Recommended reading: +* [Processing Amazon DynamoDB Streams Using the Amazon Kinesis Client Library](https://aws.amazon.com/blogs/big-data/processing-amazon-dynamodb-streams-using-the-amazon-kinesis-client-library/) + +[^fixed-commit-footnote]: This link is a point-in-time reference to a specific commit to guarantee static line numbers. + This code reference is not guaranteed to remain consistent with the `master` branch. + +[consumer-task]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java +[consumer-task-impl]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java +[dynamodb]: https://aws.amazon.com/dynamodb/ +[hierarchical-shard-syncer]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java +[kcl-concepts]: https://docs.aws.amazon.com/streams/latest/dev/shared-throughput-kcl-consumers.html#shared-throughput-kcl-consumers-concepts +[kcl-leasetable]: https://docs.aws.amazon.com/streams/latest/dev/shared-throughput-kcl-consumers.html#shared-throughput-kcl-consumers-leasetable +[lease-auditor-config]: https://github.com/awslabs/amazon-kinesis-client/blob/3d6800874cdc5e4c18df6ea0197f607f6298cab7/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java#L204-L209 +[lease-cleanup-config]: https://github.com/awslabs/amazon-kinesis-client/blob/3d6800874cdc5e4c18df6ea0197f607f6298cab7/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java#L112-L128 +[lease-cleanup-manager-cleanupleaseforcompletedshard]: https://github.com/awslabs/amazon-kinesis-client/blob/3d6800874cdc5e4c18df6ea0197f607f6298cab7/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCleanupManager.java#L263-L294 +[lease-coordinator]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java +[lease-coordinator-impl]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java +[lease-refresher]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java +[lease-refresher-impl]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java +[lease-taker]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java +[lease-taker-impl]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java +[list-shards]: https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ListShards.html +[max-leases-to-steal-config]: https://github.com/awslabs/amazon-kinesis-client/blob/3d6800874cdc5e4c18df6ea0197f607f6298cab7/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java#L142-L149 +[non-empty-lease-table-synchronizer]: https://github.com/awslabs/amazon-kinesis-client/blob/master/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java#L857-L910 +[periodic-shard-sync-manager]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java +[periodic-shard-sync-manager-checkforshardsync]: https://github.com/awslabs/amazon-kinesis-client/blob/3d6800874cdc5e4c18df6ea0197f607f6298cab7/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java#L267-L300 +[scheduler]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java +[shard-detector]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java +[shard-detector-impl]: /amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java diff --git a/docs/multilang/configuring-credential-providers.md b/docs/multilang/configuring-credential-providers.md new file mode 100644 index 00000000..9b85baaa --- /dev/null +++ b/docs/multilang/configuring-credential-providers.md @@ -0,0 +1,71 @@ +# Configuring Credential Providers + +[AwsCredentialProviders][aws-credentials-provider] are not a one-size-fits-all. +The AWS SDK provides a rich API to support various configurations for many different providers. +KCL multilang does not, and is not intended to, proxy the full breadth of the AWS SDK. +However, KCL now provides better extensibility to handle, and be enhanced to handle, additional configurations. +This document should help multilang customers configure a suitable `CredentialProvider` (or contribute changes to support a new use case!). + +## Sample Provider Configuration + +In a Properties file, an `AWSCredentialsProperty` configuration might look like: +``` +AWSCredentialsProvider = STSAssumeRoleSessionCredentialsProvider|| +``` +This basic configuration creates an [STSAssumeRoleSessionCredentialsProvider][sts-assume-provider] with an ARN and session name. +While functional, this configuration is limited. +For example, this configuration cannot set a regional endpoint (e.g., VPC use case). + +Leveraging nested properties, an `AWSCredentialsProperty` value might change to: +``` +AWSCredentialsProvider = KclSTSAssumeRoleSessionCredentialsProvider||\ + |endpointRegion=us-east-1|externalId=spartacus +``` +N.B. Backslash (`\`) is for multi-line legibility and is not required. + +## Nested Properties + +KCL multilang supports "nested properties" on the `AWSCredentialsProvider` key in the properties file. +The [Backus-Naur form][bnf] of the value: +``` + ::= ["|" ]* ["|" ]* + ::= | + ::= # this depends on the provider + ::= "=" + ::= + # this depends on the nested key +``` + +In general, required parameters are passed directly to the class' constructor +(e.g., [STSAssumeRoleSessionCredentialsProvider(String, String)][sts-assume-provider-constructor]). + +Nested properties are a custom mapping provided by KCL multilang, and do not exist in the AWS SDK. +See [NestedPropertyKey][nested-property-key] for the supported keys, and details on their expected values. + +## Nested Property Processor + +Nested keys are processed via [NestedPropertyProcessor][nested-property-processor]. +Implementation is, obviously, dependent on the implementing class. +Adding a new nested key should be trivial. +A backwards-compatible addition might look like: +``` + default void acceptFoo(...) { + // do nothing + } +``` + +### KclSTSAssumeRoleSessionCredentialsProvider + +KCL multilang includes a [custom nested property processor for `STSAssumeRole`][kcl-sts-provider]. +Multilang configurations that use `STSAssumeRoleSessionCredentialsProvider` need only prefix `Kcl` to exercise this new provider: +``` +AWSCredentialsProvider = KclSTSAssumeRoleSessionCredentialsProvider|| +``` + +[aws-credentials-provider]: https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/auth/credentials/AwsCredentialsProvider.html +[bnf]: https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form +[kcl-sts-provider]: /amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProvider.java +[nested-property-key]: /amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyKey.java +[nested-property-processor]: /amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyProcessor.java +[sts-assume-provider]: https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/STSAssumeRoleSessionCredentialsProvider.html +[sts-assume-provider-constructor]: https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/STSAssumeRoleSessionCredentialsProvider.html#STSAssumeRoleSessionCredentialsProvider-java.lang.String-java.lang.String- diff --git a/docs/plantuml/lease-shard-sync-initialization.puml b/docs/plantuml/lease-shard-sync-initialization.puml new file mode 100644 index 00000000..b4aa2de3 --- /dev/null +++ b/docs/plantuml/lease-shard-sync-initialization.puml @@ -0,0 +1,23 @@ +@startuml +'https://plantuml.com/sequence-diagram + +autonumber + +title KCL Shard Syncing Initialization (Abridged) + +participant Scheduler as S +participant LeaseCoordinator as LC +participant PeriodShardSyncManager as PSS +participant "Lease Table\n(DynamoDB)" as DDB + +alt on initialization + S->S: create PeriodicShardSyncManager(\n ..., leaseRefresher, leasesRecoveryAuditorExecutionFrequencyMillis, ...) + S->LC: initialize() + opt if lease table does not exist + LC->DDB: create lease table + end + S->PSS: start() + PSS->PSS: schedule self every\n leasesRecoveryAuditorExecutionFrequencyMillis +end + +@enduml diff --git a/docs/plantuml/lease-shard-sync-loop.puml b/docs/plantuml/lease-shard-sync-loop.puml new file mode 100644 index 00000000..6c0b9a63 --- /dev/null +++ b/docs/plantuml/lease-shard-sync-loop.puml @@ -0,0 +1,52 @@ +@startuml +'https://plantuml.com/sequence-diagram + +autonumber + +title KCL Shard Syncing Main Loop (Abridged) + +participant PeriodShardSyncManager as PSS +participant ShardSyncTask as SST +participant ShardDetector as SD +participant HierarchicalShardSyncer as HSS +participant LeaseRefresher as LR +participant LeaseSynchronizer as LS +participant "Lease Table\n(DynamoDB)" as DDB + +loop every leasesRecoveryAuditorExecutionFrequencyMillis + opt if worker is not leader + PSS->PSS: go back to sleep + end + + PSS->PSS: runShardSync() + opt if not required to sync shards + PSS->PSS: go back to sleep + end + + PSS->SST: call() + SST->HSS: checkAndCreateLeasesForNewShards(\n shardDetector, initialPosition, ...) + opt if lease table is empty + HSS->HSS: getShardListAtInitialPosition(\n shardDetector, initialPosition) + HSS->SD: listShardsWithFilter(initialPositionFilter) + else lease table is non-empty + HSS->HSS: getShardList(shardDetector) + HSS->SD: listShards(...) + note over SD + ShardDetector invokes the + Kinesis Data Streams ListShards API. + end note + end + HSS->HSS: checkAndCreateLeaseForNewShards(\n shardDetector, leaseRefresher,\n initialPosition, ...) + HSS->LR: listLeases() + LR->DDB: scan(:streamName=streamName) + DDB->LR: leases from table + LR->HSS: leases from table + HSS->LS: determine leases to create + LS->HSS: leases that are eligible for processing + loop every lease to create + HSS->LR: createLeaseIfNotExists(lease) + LR->DDB: putItem(lease) + end +end + +@enduml diff --git a/docs/plantuml/lease-taking.puml b/docs/plantuml/lease-taking.puml new file mode 100644 index 00000000..a49de758 --- /dev/null +++ b/docs/plantuml/lease-taking.puml @@ -0,0 +1,33 @@ +@startuml +'https://plantuml.com/sequence-diagram + +autonumber + +title KCL Lease Taking + +participant LeaseCoordinator as LC +participant LeaseTaker as LT +participant LeaseRefresher as LR +participant "Lease Table\n(DynamoDB)" as DDB + +loop every 2*(leaseDurationMillis + epsilonMillis) + LC->LT: takeLeases() + LT->LT: updateAllLeases() + LT->LR: listLeases() + LR->DDB: scan(:streamName=streamName) + DDB->LR: leases from table + LR->LT: leases from table + LT->LT: evict from memory any lease not returned from table + LT->LT: getExpiredLeases() + note over LT + Leases are "expired" if their last scan time exceeds the max lease duration. + end note + LT->LT: calculate lease count per host + LT->LT: chooseLeasesToSteal() + loop for each lease to take/steal + LT->LR: takeLease(lease, worker) + LR->DDB: updateItem(lease) + end +end + +@enduml \ No newline at end of file diff --git a/docs/plantuml/leases-and-operations.puml b/docs/plantuml/leases-and-operations.puml new file mode 100644 index 00000000..d865fa6b --- /dev/null +++ b/docs/plantuml/leases-and-operations.puml @@ -0,0 +1,35 @@ +@startuml +'https://plantuml.com/activity-diagram-legacy + +title KCL Shard<->Lease Assignments + +[*] --> ShardNoOp +[*] --> ShardSplit +[*] --> ShardMerge +ShardNoOp --> [*] +ShardSplit --> [*] +ShardMerge --> [*] + +state ShardNoOp { + shard_0 : lease_0 +} + +state ShardSplit { + shard_1 : lease_1 + shard_4 : lease_4 + shard_5 : lease_5 + + shard_1 --> shard_4 : split + shard_1 --> shard_5 : split +} + +state ShardMerge { + shard_2 : lease_2 + shard_3 : lease_3 + shard_6 : lease_6 + + shard_2 --> shard_6 : merge + shard_3 --> shard_6 : merge +} + +@enduml \ No newline at end of file diff --git a/pom.xml b/pom.xml index d7b6d42a..e9a9bcb0 100644 --- a/pom.xml +++ b/pom.xml @@ -22,7 +22,7 @@ amazon-kinesis-client-pom pom Amazon Kinesis Client Library - ${revision} + 2.6.1-SNAPSHOT The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data from Amazon Kinesis. @@ -33,8 +33,8 @@ - 2.4.6-SNAPSHOT - 2.19.31 + 2.25.64 + UTF-8 @@ -73,18 +73,60 @@ + + + org.apache.maven.plugins + maven-checkstyle-plugin + 3.3.1 + + checkstyle/checkstyle.xml + true + true + true + checkstyle/checkstyle-suppressions.xml + + + + validate + + check + + + + + org.apache.maven.plugins maven-compiler-plugin - 3.8.1 + 3.13.0 - 1.8 - 1.8 + 8 UTF-8 + + com.diffplug.spotless + spotless-maven-plugin + 2.30.0 + + + + + java,,\# + + + + + + + check + + compile + + + @@ -97,7 +139,7 @@ org.apache.maven.plugins maven-gpg-plugin - 3.0.1 + 3.2.1 sign-artifacts @@ -111,7 +153,7 @@ org.sonatype.plugins nexus-staging-maven-plugin - 1.6.8 + 1.6.13 true sonatype-nexus-staging