Merge branch 'update-2023-08-18' into latency-issue
This commit is contained in:
commit
946d1f16c5
163 changed files with 6003 additions and 2826 deletions
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
|
|
@ -2,12 +2,18 @@ version: 2
|
|||
updates:
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/"
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "v2.x"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
# branch - v1.x
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/"
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "v1.x"
|
||||
target-branch: "v1.x"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
|
|
|||
32
.github/workflows/maven.yml
vendored
Normal file
32
.github/workflows/maven.yml
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
# This workflow will build a Java project with Maven, and cache/restore any dependencies to improve the workflow execution time
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-java-with-maven
|
||||
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
name: Java CI with Maven
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
pull_request:
|
||||
branches:
|
||||
- "master"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up JDK 8
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '8'
|
||||
distribution: 'corretto'
|
||||
- name: Build with Maven
|
||||
run: mvn -B package --file pom.xml -DskipITs
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -4,3 +4,5 @@ AwsCredentials.properties
|
|||
*.iml
|
||||
.sdkmanrc
|
||||
.vscode
|
||||
*.swp
|
||||
.DS_Store
|
||||
|
|
|
|||
BIN
.log.swp
BIN
.log.swp
Binary file not shown.
380
CHANGELOG.md
380
CHANGELOG.md
|
|
@ -1,5 +1,94 @@
|
|||
# Changelog
|
||||
|
||||
For **1.x** release notes, please see [v1.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v1.x/CHANGELOG.md)
|
||||
|
||||
---
|
||||
### Release 2.5.2 (2023-08-07)
|
||||
* [#1184](https://github.com/awslabs/amazon-kinesis-client/pull/1184) [#367] Enhanced multi-lang `AWSCredentialsProvider=...` decoder and c…
|
||||
* [#1186](https://github.com/awslabs/amazon-kinesis-client/pull/1186) Provided documentation for multilang's new NestedPropertyKey enhancement.
|
||||
* [#1181](https://github.com/awslabs/amazon-kinesis-client/pull/1181) CVE-2023-2976: Upgrade Google Guava dependency version from `32.0.0-jre` to `32.1.1-jre`
|
||||
* [#1159](https://github.com/awslabs/amazon-kinesis-client/pull/1159) Bug fix in lease refresher integration test with occasional failures
|
||||
* [#1157](https://github.com/awslabs/amazon-kinesis-client/pull/1157) Fix NPE on graceful shutdown before DDB `LeaseCoordinator` starts.
|
||||
* [#1152](https://github.com/awslabs/amazon-kinesis-client/pull/1152) Adding resharding integration tests and changing ITs to not run by default
|
||||
* [#1162](https://github.com/awslabs/amazon-kinesis-client/pull/1162) Only deleting resource created by ITs
|
||||
* [#1158](https://github.com/awslabs/amazon-kinesis-client/pull/1158) Checkstyle: tightened `LineLength` restriction from 170 to 150.
|
||||
* [#1151](https://github.com/awslabs/amazon-kinesis-client/pull/1151) Modified `dependabot.yml` to set the correct `v[1|2].x` label.
|
||||
* [#1164](https://github.com/awslabs/amazon-kinesis-client/pull/1164) Upgraded KCL Version from 2.5.1 to 2.5.2-SNAPSHOT
|
||||
|
||||
### Release 2.5.1 (June 27, 2023)
|
||||
* [#1143](https://github.com/awslabs/amazon-kinesis-client/pull/1143) Upgrade MultiLangDaemon to support StreamARN
|
||||
* [#1145](https://github.com/awslabs/amazon-kinesis-client/pull/1145) Introduced GitHub actions to trigger Maven builds during merge/pull requests
|
||||
* [#1136](https://github.com/awslabs/amazon-kinesis-client/pull/1136) Added testing architecture and KCL 2.x basic polling/streaming tests
|
||||
* [#1153](https://github.com/awslabs/amazon-kinesis-client/pull/1153) Checkstyle: added `UnusedImports` check.
|
||||
* [#1150](https://github.com/awslabs/amazon-kinesis-client/pull/1150) Enabled Checkstyle validation of test resources.
|
||||
* [#1149](https://github.com/awslabs/amazon-kinesis-client/pull/1149) Bound Checkstyle to `validate` goal for automated enforcement.
|
||||
* [#1148](https://github.com/awslabs/amazon-kinesis-client/pull/1148) Code cleanup to faciliate Checkstyle enforcement.
|
||||
* [#1142](https://github.com/awslabs/amazon-kinesis-client/pull/1142) Upgrade Google Guava dependency version from 31.1-jre to 32.0.0-jre
|
||||
* [#1115](https://github.com/awslabs/amazon-kinesis-client/pull/1115) Update KCL version from 2.5.0 to 2.5.1-SNAPSHOT
|
||||
|
||||
### Release 2.5.0 (May 19, 2023)
|
||||
* **[#1109](https://github.com/awslabs/amazon-kinesis-client/pull/1109) Add support for stream ARNs**
|
||||
* **[#1065](https://github.com/awslabs/amazon-kinesis-client/pull/1065) Allow tags to be added when lease table is created**
|
||||
* [#1094](https://github.com/awslabs/amazon-kinesis-client/pull/1094) Code cleanup to introduce better testing
|
||||
* [#1088](https://github.com/awslabs/amazon-kinesis-client/pull/1088) Minimize race in PSSM to optimize shard sync calls
|
||||
* [#1086](https://github.com/awslabs/amazon-kinesis-client/pull/1086) Add additional SingleStreamTracker constructor with stream position parameter
|
||||
* [#1084](https://github.com/awslabs/amazon-kinesis-client/pull/1084) More consistent testing behavior with restartAfterRequestTimerExpires
|
||||
* [#1066](https://github.com/awslabs/amazon-kinesis-client/pull/1066) More consistent testing behavior with HashRangesAreAlwaysComplete
|
||||
* [#1072](https://github.com/awslabs/amazon-kinesis-client/pull/1072) Upgrade nexus-staging-maven-plugin from 1.6.8 to 1.6.13
|
||||
* [#1073](https://github.com/awslabs/amazon-kinesis-client/pull/1073) Upgrade slf4j-api from 2.0.6 to 2.0.7
|
||||
* [#1090](https://github.com/awslabs/amazon-kinesis-client/pull/1090) Upgrade awssdk.version from 2.20.8 to 2.20.43
|
||||
* [#1071](https://github.com/awslabs/amazon-kinesis-client/pull/1071) Upgrade maven-compiler-plugin from 3.8.1 to 3.11.0
|
||||
|
||||
### Release 2.4.8 (March 21, 2023)
|
||||
* [#1080](https://github.com/awslabs/amazon-kinesis-client/pull/1080) Added metric in `ShutdownTask` for scenario when parent leases are missing.
|
||||
* [#1077](https://github.com/awslabs/amazon-kinesis-client/pull/1077) Reverted changes to pom property
|
||||
* [#1069](https://github.com/awslabs/amazon-kinesis-client/pull/1069) Fixed flaky InitializationWaitsWhenLeaseTableIsEmpty test
|
||||
|
||||
|
||||
### Release 2.4.7 (March 17, 2023)
|
||||
* **NOTE: Due to an issue during the release process, the 2.4.7 published artifacts are incomplete and non-viable. Please use 2.4.8 or later.**
|
||||
* [#1063](https://github.com/awslabs/amazon-kinesis-client/pull/1063) Allow leader to learn new leases upon re-election to avoid unnecessary shardSyncs
|
||||
* [#1060](https://github.com/awslabs/amazon-kinesis-client/pull/1060) Add new metric to be emitted on lease creation
|
||||
* [#1057](https://github.com/awslabs/amazon-kinesis-client/pull/1057) Added more logging in `Scheduler` w.r.t. `StreamConfig`s.
|
||||
* [#1059](https://github.com/awslabs/amazon-kinesis-client/pull/1059) DRY: simplification of `HierarchicalShardSyncerTest`.
|
||||
* [#1062](https://github.com/awslabs/amazon-kinesis-client/pull/1062) Fixed retry storm in `PrefetchRecordsPublisher`.
|
||||
* [#1061](https://github.com/awslabs/amazon-kinesis-client/pull/1061) Fixed NPE in `LeaseCleanupManager`.
|
||||
* [#1056](https://github.com/awslabs/amazon-kinesis-client/pull/1056) Clean up in-memory state of deleted kinesis stream in MultiStreamMode
|
||||
* [#1058](https://github.com/awslabs/amazon-kinesis-client/pull/1058) Documentation: added `<pre>` tags so fixed-format diagrams aren't garbled.
|
||||
* [#1053](https://github.com/awslabs/amazon-kinesis-client/pull/1053) Exposed convenience method of `ExtendedSequenceNumber#isSentinelCheckpoint()`
|
||||
* [#1043](https://github.com/awslabs/amazon-kinesis-client/pull/1043) Removed a `.swp` file, and updated `.gitignore`.
|
||||
* [#1047](https://github.com/awslabs/amazon-kinesis-client/pull/1047) Upgrade awssdk.version from 2.19.31 to 2.20.8
|
||||
* [#1046](https://github.com/awslabs/amazon-kinesis-client/pull/1046) Upgrade maven-javadoc-plugin from 3.3.1 to 3.5.0
|
||||
* [#1038](https://github.com/awslabs/amazon-kinesis-client/pull/1038) Upgrade gsr.version from 1.1.13 to 1.1.14
|
||||
* [#1037](https://github.com/awslabs/amazon-kinesis-client/pull/1037) Upgrade aws-java-sdk.version from 1.12.370 to 1.12.405
|
||||
|
||||
### Release 2.4.6 (February 21, 2023)
|
||||
* [#1041](https://github.com/awslabs/amazon-kinesis-client/pull/1041) Minor optimizations (e.g., calculate-once, put instead of get+put)
|
||||
* [#1035](https://github.com/awslabs/amazon-kinesis-client/pull/1035) Release Note updates to avoid duplication and bitrot (e.g., 1.x release
|
||||
* [#935](https://github.com/awslabs/amazon-kinesis-client/pull/935) Pass isAtShardEnd correctly to processRecords call
|
||||
* [#1040](https://github.com/awslabs/amazon-kinesis-client/pull/1040) Increased logging verbosity around lease management
|
||||
* [#1024](https://github.com/awslabs/amazon-kinesis-client/pull/1024) Added logging w.r.t. StreamConfig handling.
|
||||
* [#1034](https://github.com/awslabs/amazon-kinesis-client/pull/1034) Optimization: 9~15% improvement in KinesisDataFetcher wall-time
|
||||
* [#1045](https://github.com/awslabs/amazon-kinesis-client/pull/1045) Fixed duplication of project version in children pom.xml
|
||||
* [#956](https://github.com/awslabs/amazon-kinesis-client/pull/956) Fixed warning message typos
|
||||
* [#795](https://github.com/awslabs/amazon-kinesis-client/pull/795) Fixed log message spacing
|
||||
* [#740](https://github.com/awslabs/amazon-kinesis-client/pull/740) Fixed typo in Comment
|
||||
* [#1028](https://github.com/awslabs/amazon-kinesis-client/pull/1028) Refactored MultiStreamTracker to provide and enhance OOP for both
|
||||
* [#1027](https://github.com/awslabs/amazon-kinesis-client/pull/1027) Removed CHECKSTYLE:OFF toggles which can invite/obscure sub-par code.
|
||||
* [#1032](https://github.com/awslabs/amazon-kinesis-client/pull/1032) Upgrade rxjava from 3.1.5 to 3.1.6
|
||||
* [#1030](https://github.com/awslabs/amazon-kinesis-client/pull/1030) Upgrade awssdk.version from 2.19.2 to 2.19.31
|
||||
* [#1029](https://github.com/awslabs/amazon-kinesis-client/pull/1029) Upgrade slf4j-api from 2.0.0 to 2.0.6
|
||||
* [#1015](https://github.com/awslabs/amazon-kinesis-client/pull/1015) Upgrade protobuf-java from 3.21.5 to 3.21.12
|
||||
|
||||
### Release 2.4.5 (January 04, 2023)
|
||||
* [#1014](https://github.com/awslabs/amazon-kinesis-client/pull/1014) Use AFTER_SEQUENCE_NUMBER iterator type for expired iterator request
|
||||
|
||||
### Release 2.4.4 (December 23, 2022)
|
||||
* [#1017](https://github.com/awslabs/amazon-kinesis-client/pull/1017) Upgrade aws sdk
|
||||
* aws-java-sdk.version from 1.12.296 -> 1.12.370
|
||||
* awssdk.version from 2.17.268 -> 2.19.2
|
||||
* [#1020](https://github.com/awslabs/amazon-kinesis-client/pull/1020) Correct the KCL version in the main pom
|
||||
|
||||
### Release 2.4.3 (September 6, 2022)
|
||||
* [#980](https://github.com/awslabs/amazon-kinesis-client/pull/980) logback-classic: 1.2.9 -> 1.4.0
|
||||
* [#983](https://github.com/awslabs/amazon-kinesis-client/pull/983)
|
||||
|
|
@ -472,297 +561,6 @@ Suppression can be configured by setting `LifecycleConfig#readTimeoutsToIgnoreBe
|
|||
* MultiLangDaemon is now a separate module
|
||||
The MultiLangDaemon has been separated to its own Maven module and is no longer available in `amazon-kinesis-client`. To include the MultiLangDaemon, add a dependency on `amazon-kinesis-client-multilang`.
|
||||
|
||||
## Release 1.9.1 (April 30, 2018)
|
||||
* Added the ability to create a prepared checkpoint when at `SHARD_END`.
|
||||
* [PR #301](https://github.com/awslabs/amazon-kinesis-client/pull/301)
|
||||
* Added the ability to subscribe to worker state change events.
|
||||
* [PR #291](https://github.com/awslabs/amazon-kinesis-client/pull/291)
|
||||
* Added support for custom lease managers.
|
||||
A custom `LeaseManager` can be provided to `Worker.Builder` that will be used to provide lease services.
|
||||
This makes it possible to implement custom lease management systems in addition to the default DynamoDB system.
|
||||
* [PR #297](https://github.com/awslabs/amazon-kinesis-client/pull/297)
|
||||
* Updated the version of the AWS Java SDK to 1.11.219
|
||||
|
||||
## Release 1.9.0 (February 6, 2018)
|
||||
* Introducing support for ListShards API. This API is used in place of DescribeStream API to provide more throughput during ShardSyncTask. Please consult the [AWS Documentation for ListShards](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ListShards.html) for more information.
|
||||
* ListShards supports higher call rate, which should reduce instances of throttling when attempting to synchronize the shard list.
|
||||
* __WARNING: `ListShards` is a new API, and may require updating any explicit IAM policies__
|
||||
* Added configuration parameters for ListShards usage
|
||||
|
||||
| Name | Default | Description |
|
||||
| ---- | ------- | ----------- |
|
||||
| [listShardsBackoffTimeInMillis](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1412) | 1500 ms | This is the default backoff time between 2 ListShards calls when throttled. |
|
||||
| [listShardsRetryAttempts](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1423) | 50 | This is the maximum number of times the KinesisProxy will retry to make ListShards calls on being throttled. |
|
||||
|
||||
* Updating the version of AWS Java SDK to 1.11.272.
|
||||
* Version 1.11.272 is now the minimum support version of the SDK.
|
||||
* Deprecating the following methods, and classes. These methods, and classes will be removed in a future release.
|
||||
* Deprecated [IKinesisProxy#getStreamInfo](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxy.java#L48-L62).
|
||||
* Deprecated [IKinesisProxyFactory](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxyFactory.java).
|
||||
* Deprecated [KinesisProxyFactory](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxyFactory.java).
|
||||
* Deprecated certain [KinesisProxy](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxy.java) constructors.
|
||||
* [PR #293](https://github.com/awslabs/amazon-kinesis-client/pull/293)
|
||||
|
||||
## Release 1.8.10
|
||||
* Allow providing a custom IKinesisProxy implementation.
|
||||
* [PR #274](https://github.com/awslabs/amazon-kinesis-client/pull/274)
|
||||
* Checkpointing on a different thread should no longer emit a warning about NullMetricsScope.
|
||||
* [PR #284](https://github.com/awslabs/amazon-kinesis-client/pull/284)
|
||||
* [Issue #48](https://github.com/awslabs/amazon-kinesis-client/issues/48)
|
||||
* Upgraded the AWS Java SDK to version 1.11.271
|
||||
* [PR #287](https://github.com/awslabs/amazon-kinesis-client/pull/287)
|
||||
|
||||
## Release 1.8.9
|
||||
* Allow disabling check for the case where a child shard has an open parent shard.
|
||||
There is a race condition where it's possible for the a parent shard to appear open, while having child shards. This check can now be disabled by setting [`ignoreUnexpectedChildShards`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1037) to true.
|
||||
* [PR #240](https://github.com/awslabs/amazon-kinesis-client/pull/240)
|
||||
* [Issue #210](https://github.com/awslabs/amazon-kinesis-client/issues/210)
|
||||
* Upgraded the AWS SDK for Java to 1.11.261
|
||||
* [PR #281](https://github.com/awslabs/amazon-kinesis-client/pull/281)
|
||||
|
||||
## Release 1.8.8
|
||||
* Fixed issues with leases losses due to `ExpiredIteratorException` in `PrefetchGetRecordsCache` and `AsynchronousFetchingStrategy`.
|
||||
PrefetchGetRecordsCache will request for a new iterator and start fetching data again.
|
||||
* [PR#263](https://github.com/awslabs/amazon-kinesis-client/pull/263)
|
||||
* Added warning message for long running tasks.
|
||||
Logging long running tasks can be enabled by setting the following configuration property:
|
||||
|
||||
| Name | Default | Description |
|
||||
| ---- | ------- | ----------- |
|
||||
| [`logWarningForTaskAfterMillis`](https://github.com/awslabs/amazon-kinesis-client/blob/3de901ea9327370ed732af86c4d4999c8d99541c/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1367) | Not set | Milliseconds after which the logger will log a warning message for the long running task |
|
||||
|
||||
* [PR#259](https://github.com/awslabs/amazon-kinesis-client/pull/259)
|
||||
* Handling spurious lease renewal failures gracefully.
|
||||
Added better handling of DynamoDB failures when updating leases. These failures would occur when a request to DynamoDB appeared to fail, but was actually successful.
|
||||
* [PR#247](https://github.com/awslabs/amazon-kinesis-client/pull/247)
|
||||
* ShutdownTask gets retried if the previous attempt on the ShutdownTask fails.
|
||||
* [PR#267](https://github.com/awslabs/amazon-kinesis-client/pull/267)
|
||||
* Fix for using maxRecords from `KinesisClientLibConfiguration` in `GetRecordsCache` for fetching records.
|
||||
* [PR#264](https://github.com/awslabs/amazon-kinesis-client/pull/264)
|
||||
|
||||
## Release 1.8.7
|
||||
* Don't add a delay for synchronous requests to Kinesis
|
||||
Removes a delay that had been added for synchronous `GetRecords` calls to Kinesis.
|
||||
* [PR #256](https://github.com/awslabs/amazon-kinesis-client/pull/256)
|
||||
|
||||
## Release 1.8.6
|
||||
* Add prefetching of records from Kinesis
|
||||
Prefetching will retrieve and queue additional records from Kinesis while the application is processing existing records.
|
||||
Prefetching can be enabled by setting [`dataFetchingStrategy`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1317) to `PREFETCH_CACHED`. Once enabled an additional fetching thread will be started to retrieve records from Kinesis. Retrieved records will be held in a queue until the application is ready to process them.
|
||||
Pre-fetching supports the following configuration values:
|
||||
|
||||
| Name | Default | Description |
|
||||
| ---- | ------- | ----------- |
|
||||
| [`dataFetchingStrategy`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1317) | `DEFAULT` | Which data fetching strategy to use |
|
||||
| [`maxPendingProcessRecordsInput`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1296) | 3 | The maximum number of process records input that can be queued |
|
||||
| [`maxCacheByteSize`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1307) | 8 MiB | The maximum number of bytes that can be queued |
|
||||
| [`maxRecordsCount`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1326) | 30,000 | The maximum number of records that can be queued |
|
||||
| [`idleMillisBetweenCalls`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1353) | 1,500 ms | The amount of time to wait between calls to Kinesis |
|
||||
|
||||
* [PR #246](https://github.com/awslabs/amazon-kinesis-client/pull/246)
|
||||
|
||||
## Release 1.8.5 (September 26, 2017)
|
||||
* Only advance the shard iterator for the accepted response.
|
||||
This fixes a race condition in the `KinesisDataFetcher` when it's being used to make asynchronous requests. The shard iterator is now only advanced when the retriever calls `DataFetcherResult#accept()`.
|
||||
* [PR #230](https://github.com/awslabs/amazon-kinesis-client/pull/230)
|
||||
* [Issue #231](https://github.com/awslabs/amazon-kinesis-client/issues/231)
|
||||
|
||||
## Release 1.8.4 (September 22, 2017)
|
||||
* Create a new completion service for each request.
|
||||
This ensures that canceled tasks are discarded. This will prevent a cancellation exception causing issues processing records.
|
||||
* [PR #227](https://github.com/awslabs/amazon-kinesis-client/pull/227)
|
||||
* [Issue #226](https://github.com/awslabs/amazon-kinesis-client/issues/226)
|
||||
|
||||
## Release 1.8.3 (September 22, 2017)
|
||||
* Call shutdown on the retriever when the record processor is being shutdown
|
||||
This fixes a bug that could leak threads if using the [`AsynchronousGetRecordsRetrievalStrategy`](https://github.com/awslabs/amazon-kinesis-client/blob/9a82b6bd05b3c9c5f8581af007141fa6d5f0fc4e/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/AsynchronousGetRecordsRetrievalStrategy.java#L42) is being used.
|
||||
The asynchronous retriever is only used when [`KinesisClientLibConfiguration#retryGetRecordsInSeconds`](https://github.com/awslabs/amazon-kinesis-client/blob/01d2688bc6e68fd3fe5cb698cb65299d67ac930d/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L227), and [`KinesisClientLibConfiguration#maxGetRecordsThreadPool`](https://github.com/awslabs/amazon-kinesis-client/blob/01d2688bc6e68fd3fe5cb698cb65299d67ac930d/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L230) are set.
|
||||
* [PR #222](https://github.com/awslabs/amazon-kinesis-client/pull/222)
|
||||
|
||||
## Release 1.8.2 (September 20, 2017)
|
||||
* Add support for two phase checkpoints
|
||||
Applications can now set a pending checkpoint, before completing the checkpoint operation. Once the application has completed its checkpoint steps, the final checkpoint will clear the pending checkpoint.
|
||||
Should the checkpoint fail the attempted sequence number is provided in the [`InitializationInput#getPendingCheckpointSequenceNumber`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/InitializationInput.java#L81) otherwise the value will be null.
|
||||
* [PR #188](https://github.com/awslabs/amazon-kinesis-client/pull/188)
|
||||
* Support timeouts, and retry for GetRecords calls.
|
||||
Applications can now set timeouts for GetRecord calls to Kinesis. As part of setting the timeout, the application must also provide a thread pool size for concurrent requests.
|
||||
* [PR #214](https://github.com/awslabs/amazon-kinesis-client/pull/214)
|
||||
* Notification when the lease table is throttled
|
||||
When writes, or reads, to the lease table are throttled a warning will be emitted. If you're seeing this warning you should increase the IOPs for your lease table to prevent processing delays.
|
||||
* [PR #212](https://github.com/awslabs/amazon-kinesis-client/pull/212)
|
||||
* Support configuring the graceful shutdown timeout for MultiLang Clients
|
||||
This adds support for setting the timeout that the Java process will wait for the MutliLang client to complete graceful shutdown. The timeout can be configured by adding `shutdownGraceMillis` to the properties file set to the number of milliseconds to wait.
|
||||
* [PR #204](https://github.com/awslabs/amazon-kinesis-client/pull/204)
|
||||
|
||||
## Release 1.8.1 (August 2, 2017)
|
||||
* Support timeouts for calls to the MultiLang Daemon
|
||||
This adds support for setting a timeout when dispatching records to the client record processor. If the record processor doesn't respond within the timeout the parent Java process will be terminated. This is a temporary fix to handle cases where the KCL becomes blocked while waiting for a client record processor.
|
||||
The timeout for the this can be set by adding `timeoutInSeconds = <timeout value>`. The default for this is no timeout.
|
||||
__Setting this can cause the KCL to exit suddenly, before using this ensure that you have an automated restart for your application__
|
||||
* [PR #195](https://github.com/awslabs/amazon-kinesis-client/pull/195)
|
||||
* [Issue #185](https://github.com/awslabs/amazon-kinesis-client/issues/185)
|
||||
|
||||
## Release 1.8.0 (July 25, 2017)
|
||||
* Execute graceful shutdown on its own thread
|
||||
* [PR #191](https://github.com/awslabs/amazon-kinesis-client/pull/191)
|
||||
* [Issue #167](https://github.com/awslabs/amazon-kinesis-client/issues/167)
|
||||
* Added support for controlling the size of the lease renewer thread pool
|
||||
* [PR #177](https://github.com/awslabs/amazon-kinesis-client/pull/177)
|
||||
* [Issue #171](https://github.com/awslabs/amazon-kinesis-client/issues/171)
|
||||
* Require Java 8 and later
|
||||
__Java 8 is now required for versions 1.8.0 of the amazon-kinesis-client and later.__
|
||||
* [PR #176](https://github.com/awslabs/amazon-kinesis-client/issues/176)
|
||||
|
||||
## Release 1.7.6 (June 21, 2017)
|
||||
* Added support for graceful shutdown in MultiLang Clients
|
||||
* [PR #174](https://github.com/awslabs/amazon-kinesis-client/pull/174)
|
||||
* [PR #182](https://github.com/awslabs/amazon-kinesis-client/pull/182)
|
||||
* Updated documentation for `v2.IRecordProcessor#shutdown`, and `KinesisClientLibConfiguration#idleTimeBetweenReadsMillis`
|
||||
* [PR #170](https://github.com/awslabs/amazon-kinesis-client/pull/170)
|
||||
* Updated to version 1.11.151 of the AWS Java SDK
|
||||
* [PR #183](https://github.com/awslabs/amazon-kinesis-client/pull/183)
|
||||
|
||||
## Release 1.7.5 (April 7, 2017)
|
||||
* Correctly handle throttling for DescribeStream, and save accumulated progress from individual calls.
|
||||
* [PR #152](https://github.com/awslabs/amazon-kinesis-client/pull/152)
|
||||
* Upgrade to version 1.11.115 of the AWS Java SDK
|
||||
* [PR #155](https://github.com/awslabs/amazon-kinesis-client/pull/155)
|
||||
|
||||
## Release 1.7.4 (February 27, 2017)
|
||||
* Fixed an issue building JavaDoc for Java 8.
|
||||
* [Issue #18](https://github.com/awslabs/amazon-kinesis-client/issues/18)
|
||||
* [PR #141](https://github.com/awslabs/amazon-kinesis-client/pull/141)
|
||||
* Reduce Throttling Messages to WARN, unless throttling occurs 6 times consecutively.
|
||||
* [Issue #4](https://github.com/awslabs/amazon-kinesis-client/issues/4)
|
||||
* [PR #140](https://github.com/awslabs/amazon-kinesis-client/pull/140)
|
||||
* Fixed two bugs occurring in requestShutdown.
|
||||
* Fixed a bug that prevented the worker from shutting down, via requestShutdown, when no leases were held.
|
||||
* [Issue #128](https://github.com/awslabs/amazon-kinesis-client/issues/128)
|
||||
* Fixed a bug that could trigger a NullPointerException if leases changed during requestShutdown.
|
||||
* [Issue #129](https://github.com/awslabs/amazon-kinesis-client/issues/129)
|
||||
* [PR #139](https://github.com/awslabs/amazon-kinesis-client/pull/139)
|
||||
* Upgraded the AWS SDK Version to 1.11.91
|
||||
* [PR #138](https://github.com/awslabs/amazon-kinesis-client/pull/138)
|
||||
* Use an executor returned from `ExecutorService.newFixedThreadPool` instead of constructing it by hand.
|
||||
* [PR #135](https://github.com/awslabs/amazon-kinesis-client/pull/135)
|
||||
* Correctly initialize DynamoDB client, when endpoint is explicitly set.
|
||||
* [PR #142](https://github.com/awslabs/amazon-kinesis-client/pull/142)
|
||||
|
||||
## Release 1.7.3 (January 9, 2017)
|
||||
* Upgrade to the newest AWS Java SDK.
|
||||
* [Amazon Kinesis Client Issue #27](https://github.com/awslabs/amazon-kinesis-client-python/issues/27)
|
||||
* [PR #126](https://github.com/awslabs/amazon-kinesis-client/pull/126)
|
||||
* [PR #125](https://github.com/awslabs/amazon-kinesis-client/pull/125)
|
||||
* Added a direct dependency on commons-logging.
|
||||
* [Issue #123](https://github.com/awslabs/amazon-kinesis-client/issues/123)
|
||||
* [PR #124](https://github.com/awslabs/amazon-kinesis-client/pull/124)
|
||||
* Make ShardInfo public to allow for custom ShardPrioritization strategies.
|
||||
* [Issue #120](https://github.com/awslabs/amazon-kinesis-client/issues/120)
|
||||
* [PR #127](https://github.com/awslabs/amazon-kinesis-client/pull/127)
|
||||
|
||||
## Release 1.7.2 (November 7, 2016)
|
||||
* MultiLangDaemon Feature Updates
|
||||
The MultiLangDaemon has been upgraded to use the v2 interfaces, which allows access to enhanced checkpointing, and more information during record processor initialization. The MultiLangDaemon clients must be updated before they can take advantage of these new features.
|
||||
|
||||
## Release 1.7.1 (November 3, 2016)
|
||||
* General
|
||||
* Allow disabling shard synchronization at startup.
|
||||
* Applications can disable shard synchronization at startup. Disabling shard synchronization can application startup times for very large streams.
|
||||
* [PR #102](https://github.com/awslabs/amazon-kinesis-client/pull/102)
|
||||
* Applications can now request a graceful shutdown, and record processors that implement the IShutdownNotificationAware will be given a chance to checkpoint before being shutdown.
|
||||
* This adds a [new interface](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IShutdownNotificationAware.java), and a [new method on Worker](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/Worker.java#L539).
|
||||
* [PR #109](https://github.com/awslabs/amazon-kinesis-client/pull/109)
|
||||
* Solves [Issue #79](https://github.com/awslabs/amazon-kinesis-client/issues/79)
|
||||
* MultiLangDaemon
|
||||
* Applications can now use credential provides that accept string parameters.
|
||||
* [PR #99](https://github.com/awslabs/amazon-kinesis-client/pull/99)
|
||||
* Applications can now use different credentials for each service.
|
||||
* [PR #111](https://github.com/awslabs/amazon-kinesis-client/pull/111)
|
||||
|
||||
## Release 1.7.0 (August 22, 2016)
|
||||
* Add support for time based iterators ([See GetShardIterator Documentation](http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html))
|
||||
* [PR #94](https://github.com/awslabs/amazon-kinesis-client/pull/94)
|
||||
The `KinesisClientLibConfiguration` now supports providing an initial time stamp position.
|
||||
* This position is only used if there is no current checkpoint for the shard.
|
||||
* This setting cannot be used with DynamoDB Streams
|
||||
Resolves [Issue #88](https://github.com/awslabs/amazon-kinesis-client/issues/88)
|
||||
* Allow Prioritization of Parent Shards for Task Assignment
|
||||
* [PR #95](https://github.com/awslabs/amazon-kinesis-client/pull/95)
|
||||
The `KinesisClientLibconfiguration` now supports providing a `ShardPrioritization` strategy. This strategy controls how the `Worker` determines which `ShardConsumer` to call next. This can improve processing for streams that split often, such as DynamoDB Streams.
|
||||
* Remove direct dependency on `aws-java-sdk-core`, to allow independent versioning.
|
||||
* [PR #92](https://github.com/awslabs/amazon-kinesis-client/pull/92)
|
||||
**You may need to add a direct dependency on aws-java-sdk-core if other dependencies include an older version.**
|
||||
|
||||
## Release 1.6.5 (July 25, 2016)
|
||||
* Change LeaseManager to call DescribeTable before attempting to create the lease table.
|
||||
* [Issue #36](https://github.com/awslabs/amazon-kinesis-client/issues/36)
|
||||
* [PR #41](https://github.com/awslabs/amazon-kinesis-client/pull/41)
|
||||
* [PR #67](https://github.com/awslabs/amazon-kinesis-client/pull/67)
|
||||
* Allow DynamoDB lease table name to be specified
|
||||
* [PR #61](https://github.com/awslabs/amazon-kinesis-client/pull/61)
|
||||
* Add approximateArrivalTimestamp for JsonFriendlyRecord
|
||||
* [PR #86](https://github.com/awslabs/amazon-kinesis-client/pull/86)
|
||||
* Shutdown lease renewal thread pool on exit.
|
||||
* [PR #84](https://github.com/awslabs/amazon-kinesis-client/pull/84)
|
||||
* Wait for CloudWatch publishing thread to finish before exiting.
|
||||
* [PR #82](https://github.com/awslabs/amazon-kinesis-client/pull/82)
|
||||
* Added unit, and integration tests for the library.
|
||||
|
||||
## Release 1.6.4 (July 6, 2016)
|
||||
* Upgrade to AWS SDK for Java 1.11.14
|
||||
* [Issue #74](https://github.com/awslabs/amazon-kinesis-client/issues/74)
|
||||
* [Issue #73](https://github.com/awslabs/amazon-kinesis-client/issues/73)
|
||||
* **Maven Artifact Signing Change**
|
||||
* Artifacts are now signed by the identity `Amazon Kinesis Tools <amazon-kinesis-tools@amazon.com>`
|
||||
|
||||
## Release 1.6.3 (May 12, 2016)
|
||||
* Fix format exception caused by DEBUG log in LeaseTaker [Issue # 68](https://github.com/awslabs/amazon-kinesis-client/issues/68)
|
||||
|
||||
## Release 1.6.2 (March 23, 2016)
|
||||
* Support for specifying max leases per worker and max leases to steal at a time.
|
||||
* Support for specifying initial DynamoDB table read and write capacity.
|
||||
* Support for parallel lease renewal.
|
||||
* Support for graceful worker shutdown.
|
||||
* Change DefaultCWMetricsPublisher log level to debug. [PR # 49](https://github.com/awslabs/amazon-kinesis-client/pull/49)
|
||||
* Avoid NPE in MLD record processor shutdown if record processor was not initialized. [Issue # 29](https://github.com/awslabs/amazon-kinesis-client/issues/29)
|
||||
|
||||
## Release 1.6.1 (September 23, 2015)
|
||||
* Expose [approximateArrivalTimestamp](http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) for Records in processRecords API call.
|
||||
|
||||
## Release 1.6.0 (July 31, 2015)
|
||||
* Restores compatibility with [dynamodb-streams-kinesis-adapter](https://github.com/awslabs/dynamodb-streams-kinesis-adapter) (which was broken in 1.4.0).
|
||||
|
||||
## Release 1.5.1 (July 20, 2015)
|
||||
* KCL maven artifact 1.5.0 does not work with JDK 7. This release addresses this issue.
|
||||
|
||||
## Release 1.5.0 (July 9, 2015)
|
||||
* **[Metrics Enhancements][kinesis-guide-monitoring-with-kcl]**
|
||||
* Support metrics level and dimension configurations to control CloudWatch metrics emitted by the KCL.
|
||||
* Add new metrics that track time spent in record processor methods.
|
||||
* Disable WorkerIdentifier dimension by default.
|
||||
* **Exception Reporting** — Do not silently ignore exceptions in ShardConsumer.
|
||||
* **AWS SDK Component Dependencies** — Depend only on AWS SDK components that are used.
|
||||
|
||||
## Release 1.4.0 (June 2, 2015)
|
||||
* Integration with the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]**
|
||||
* Automatically de-aggregate records put into the Kinesis stream using the KPL.
|
||||
* Support checkpointing at the individual user record level when multiple user records are aggregated into one Kinesis record using the KPL.
|
||||
|
||||
See [Consumer De-aggregation with the KCL][kinesis-guide-consumer-deaggregation] for details.
|
||||
|
||||
## Release 1.3.0 (May 22, 2015)
|
||||
* A new metric called "MillisBehindLatest", which tracks how far consumers are from real time, is now uploaded to CloudWatch.
|
||||
|
||||
## Release 1.2.1 (January 26, 2015)
|
||||
* **MultiLangDaemon** — Changes to the MultiLangDaemon to make it easier to provide a custom worker.
|
||||
|
||||
## Release 1.2 (October 21, 2014)
|
||||
* **Multi-Language Support** — Amazon KCL now supports implementing record processors in any language by communicating with the daemon over [STDIN and STDOUT][multi-lang-protocol]. Python developers can directly use the [Amazon Kinesis Client Library for Python][kclpy] to write their data processing applications.
|
||||
|
||||
## Release 1.1 (June 30, 2014)
|
||||
* **Checkpointing at a specific sequence number** — The IRecordProcessorCheckpointer interface now supports checkpointing at a sequence number specified by the record processor.
|
||||
* **Set region** — KinesisClientLibConfiguration now supports setting the region name to indicate the location of the Amazon Kinesis service. The Amazon DynamoDB table and Amazon CloudWatch metrics associated with your application will also use this region setting.
|
||||
|
||||
[kinesis]: http://aws.amazon.com/kinesis
|
||||
[kinesis-forum]: http://developer.amazonwebservices.com/connect/forum.jspa?forumID=169
|
||||
[kinesis-client-library-issues]: https://github.com/awslabs/amazon-kinesis-client/issues
|
||||
|
|
|
|||
172
README.md
172
README.md
|
|
@ -32,9 +32,17 @@ Please open an issue if you have any questions.
|
|||
## Building from Source
|
||||
|
||||
After you've downloaded the code from GitHub, you can build it using Maven. To disable GPG signing in the build, use
|
||||
this command: `mvn clean install -Dgpg.skip=true`. Note: This command runs Integration tests, which in turn creates AWS
|
||||
resources (which requires manual cleanup). Integration tests require valid AWS credentials need to be discovered at
|
||||
runtime. To skip running integration tests, add ` -DskipITs` option to the build command.
|
||||
this command: `mvn clean install -Dgpg.skip=true`.
|
||||
Note: This command does not run integration tests.
|
||||
|
||||
## Running Integration Tests
|
||||
|
||||
Note that running integration tests creates AWS resources.
|
||||
Integration tests require valid AWS credentials.
|
||||
This will look for a default AWS profile specified in your local `.aws/credentials`.
|
||||
To run all integration tests: `mvn verify -DskipITs=false`.
|
||||
To run one integration tests: `mvn -Dit.test=*IntegrationTest -DskipITs=false verify`
|
||||
Optionally, you can provide the name of an IAM user/role to run tests with as a string using this command: `mvn verify -DskipITs=false -DawsProfile="<PROFILE_NAME>"`.
|
||||
|
||||
## Integration with the Kinesis Producer Library
|
||||
For producer-side developers using the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]**, the KCL integrates without additional effort. When the KCL retrieves an aggregated Amazon Kinesis record consisting of multiple KPL user records, it will automatically invoke the KPL to extract the individual user records before returning them to the user.
|
||||
|
|
@ -50,7 +58,7 @@ The recommended way to use the KCL for Java is to consume it from Maven.
|
|||
<dependency>
|
||||
<groupId>software.amazon.kinesis</groupId>
|
||||
<artifactId>amazon-kinesis-client</artifactId>
|
||||
<version>2.4.3</version>
|
||||
<version>2.5.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
@ -66,157 +74,10 @@ The recommended way to use the KCL for Java is to consume it from Maven.
|
|||
|
||||
## Release Notes
|
||||
|
||||
### Release 2.4.3 (September 6, 2022)
|
||||
* [#980](https://github.com/awslabs/amazon-kinesis-client/pull/980) logback-classic: 1.2.9 -> 1.4.0
|
||||
* [#983](https://github.com/awslabs/amazon-kinesis-client/pull/983)
|
||||
* protobuf-java: 3.19.2 -> 3.21.5
|
||||
* slf4j.version: 1.7.32 -> 2.0.0
|
||||
* schema-registry-serde: 1.1.9 -> 1.1.13
|
||||
* [#984](https://github.com/awslabs/amazon-kinesis-client/pull/984) awssdk.version from 2.17.108 to 2.17.267
|
||||
* [#987](https://github.com/awslabs/amazon-kinesis-client/pull/987) guava: 31.0.1-jre -> 31.1-jre
|
||||
* [#988](https://github.com/awslabs/amazon-kinesis-client/pull/988) jcommander: 1.81 to 1.82
|
||||
* [#990](https://github.com/awslabs/amazon-kinesis-client/pull/990) Upgrade dependencies
|
||||
* aws-java-sdk.version: 1.12.130 -> 1.12.296
|
||||
* lombok: 1.18.22 -> 1.18.24
|
||||
* rxjava: 3.1.3 -> 3.1.5
|
||||
* maven-resources-plugin: 2.6 -> 3.3.0
|
||||
* logback-classic: 1.4.0 -> 1.3.0
|
||||
* awssdk.version: 2.17.267 -> 2.17.268
|
||||
|
||||
### Release 2.4.2 (August 10, 2022)
|
||||
* [#972](https://github.com/awslabs/amazon-kinesis-client/pull/972) Upgrade Lombok to version 1.18.24
|
||||
|
||||
### Latest Release 2.4.1 (March 24, 2022)
|
||||
[Milestone#68](https://github.com/awslabs/amazon-kinesis-client/milestone/68)
|
||||
* [#916](https://github.com/awslabs/amazon-kinesis-client/pull/916) Upgrade to rxjava3
|
||||
|
||||
### Release 2.4.0 (March 2, 2022)
|
||||
[Milestone#67](https://github.com/awslabs/amazon-kinesis-client/milestone/67)
|
||||
* [#894](https://github.com/awslabs/amazon-kinesis-client/pull/894) Bump protobuf-java from 3.19.1 to 3.19.2
|
||||
* [#924](https://github.com/awslabs/amazon-kinesis-client/pull/924) Support Protobuf Data format with Glue Schema Registry.
|
||||
|
||||
### Latest Release 2.3.10 (January 4, 2022)
|
||||
[Milestone#66](https://github.com/awslabs/amazon-kinesis-client/milestone/66)
|
||||
* [#868](https://github.com/awslabs/amazon-kinesis-client/pull/868) Adding a new metric: Application-level MillisBehindLatest
|
||||
* [#879](https://github.com/awslabs/amazon-kinesis-client/pull/879) Keep dependencies up-to-date
|
||||
* [#886](https://github.com/awslabs/amazon-kinesis-client/pull/886) Get latest counter before attempting a take to ensure take succeeds
|
||||
* [#888](https://github.com/awslabs/amazon-kinesis-client/pull/888) Configure dependabot for v1.x branch
|
||||
|
||||
### Latest Release 2.3.9 (November 22, 2021)
|
||||
[Milestone#65](https://github.com/awslabs/amazon-kinesis-client/milestone/65)
|
||||
* [#866](https://github.com/awslabs/amazon-kinesis-client/pull/866) Update logback dependency.
|
||||
|
||||
### Release 2.3.8 (October 27, 2021)
|
||||
[Milestone#64](https://github.com/awslabs/amazon-kinesis-client/milestone/64)
|
||||
* [#860](https://github.com/awslabs/amazon-kinesis-client/pull/860) Upgrade Glue schema registry from 1.1.4 to 1.1.5.
|
||||
* [#861](https://github.com/awslabs/amazon-kinesis-client/pull/861) Revert [PR#847](https://github.com/awslabs/amazon-kinesis-client/pull/847) and added new tests.
|
||||
|
||||
### Release 2.3.7 (October 11, 2021)
|
||||
[Milestone#63](https://github.com/awslabs/amazon-kinesis-client/milestone/63)
|
||||
* [#842](https://github.com/awslabs/amazon-kinesis-client/pull/842) Fixing typo is debug logs.
|
||||
* [#846](https://github.com/awslabs/amazon-kinesis-client/pull/846) Fix DynamoDBLeaseTaker logging of available leases
|
||||
* [#847](https://github.com/awslabs/amazon-kinesis-client/pull/847) Make use of Java 8 to simplify computeLeaseCounts()
|
||||
* [#853](https://github.com/awslabs/amazon-kinesis-client/pull/853) Add configurable initial position for orphaned stream
|
||||
* [#854](https://github.com/awslabs/amazon-kinesis-client/pull/854) Create DynamoDB tables on On-Demand billing mode by default.
|
||||
* [#855](https://github.com/awslabs/amazon-kinesis-client/pull/855) Emit Glue Schema Registry usage metrics
|
||||
* [#857](https://github.com/awslabs/amazon-kinesis-client/pull/857) Fix to shutdown PrefetchRecordsPublisher in gracefull manner
|
||||
* [#858](https://github.com/awslabs/amazon-kinesis-client/pull/858) Upgrade AWS SDK version to 2.17.52.
|
||||
|
||||
### Release 2.3.6 (July 9, 2021)
|
||||
[Milestone#62](https://github.com/awslabs/amazon-kinesis-client/milestone/62)
|
||||
* [#836](https://github.com/awslabs/amazon-kinesis-client/pull/836) Upgraded AWS SDK version to 2.16.98
|
||||
* [#835](https://github.com/awslabs/amazon-kinesis-client/pull/835) Upgraded Glue Schema Registry version to 1.1.1
|
||||
* [#828](https://github.com/awslabs/amazon-kinesis-client/pull/828) Modified wildcard imports to individual imports.
|
||||
* [#817](https://github.com/awslabs/amazon-kinesis-client/pull/817) Updated the Worker shutdown logic to make sure that the `LeaseCleanupManager` also terminates all the threads that it has started.
|
||||
* [#794](https://github.com/awslabs/amazon-kinesis-client/pull/794) Silence warning when there are no stale streams to delete.
|
||||
|
||||
### Release 2.3.5 (June 14, 2021)
|
||||
[Milestone#59](https://github.com/awslabs/amazon-kinesis-client/milestone/59)
|
||||
* [#824](https://github.com/awslabs/amazon-kinesis-client/pull/824) Upgraded dependencies
|
||||
* logback-classic version to 1.2.3
|
||||
* AWS Java SDK version to 1.12.3
|
||||
* AWS SDK version to 2.16.81
|
||||
* [#815](https://github.com/awslabs/amazon-kinesis-client/pull/815) Converted Future to CompletableFuture which helps in proper conversion to Scala using Scala Future Converters.
|
||||
* [#810](https://github.com/awslabs/amazon-kinesis-client/pull/810) Bump commons-io from 2.6 to 2.7
|
||||
* [#804](https://github.com/awslabs/amazon-kinesis-client/pull/804) Allowing user to specify an initial timestamp in which daemon will process records.
|
||||
* [#802](https://github.com/awslabs/amazon-kinesis-client/pull/802) Upgraded guava from 26.0-jre to 29.0-jre
|
||||
* [#801](https://github.com/awslabs/amazon-kinesis-client/pull/801) Fixing a bug that causes to block indefinitely when trying to unlock a lock that isn't locked.
|
||||
* [#762](https://github.com/awslabs/amazon-kinesis-client/pull/762) Added support for web identity token in multilang
|
||||
|
||||
### Release 2.3.4 (February 19, 2021)
|
||||
[Milestone#56](https://github.com/awslabs/amazon-kinesis-client/milestone/56)
|
||||
* [#788](https://github.com/awslabs/amazon-kinesis-client/pull/788) Fixing a bug that caused paginated `ListShards` calls with the `ShardFilter` parameter to fail when the lease table was being initialized.
|
||||
|
||||
### Release 2.3.3 (December 23, 2020)
|
||||
[Milestone#55](https://github.com/awslabs/amazon-kinesis-client/milestone/55)
|
||||
* Fixing bug in PrefetchRecordsPublisher which was causing retry storms if initial request fails.
|
||||
* Fixing bug where idleTimeBetweenReadsInMillis property was ignored in PollingConfig.
|
||||
|
||||
### Release 2.3.2 (November 19, 2020)
|
||||
[Milestone#54](https://github.com/awslabs/amazon-kinesis-client/milestone/54)
|
||||
* Adding support for Glue Schema Registry. Deserialize and read schemas associated with the records.
|
||||
* Updating AWS SDK version to 2.15.31
|
||||
|
||||
### Release 2.3.1 (October 20, 2020)
|
||||
[Milestone#53](https://github.com/awslabs/amazon-kinesis-client/milestone/53)
|
||||
* Introducing support for processing multiple kinesis data streams with the same KCL 2.x for java consumer application
|
||||
* To build a consumer application that can process multiple streams at the same time, you must implement a new
|
||||
interface called MultistreamTracker (https://github.com/awslabs/amazon-kinesis-client/blob/0c5042dadf794fe988438436252a5a8fe70b6b0b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java)
|
||||
|
||||
* MultistreamTracker will also publish various metrics around the current active streams being processed, the number
|
||||
of streams which are deleted at this time period or are pending deletion.
|
||||
|
||||
### Release 2.3.0 (August 17, 2020)
|
||||
* [Milestone#52](https://github.com/awslabs/amazon-kinesis-client/milestones/52)
|
||||
|
||||
* Behavior of shard synchronization is moving from each worker independently learning about all existing shards to workers only discovering the children of shards that each worker owns. This optimizes memory usage, lease table IOPS usage, and number of calls made to kinesis for streams with high shard counts and/or frequent resharding.
|
||||
* When bootstrapping an empty lease table, KCL utilizes the `ListShard` API's filtering option (the ShardFilter optional request parameter) to retrieve and create leases only for a snapshot of shards open at the time specified by the `ShardFilter` parameter. The `ShardFilter` parameter enables you to filter out the response of the `ListShards` API, using the `Type` parameter. KCL uses the `Type` filter parameter and the following of its valid values to identify and return a snapshot of open shards that might require new leases.
|
||||
* Currently, the following shard filters are supported:
|
||||
* `AT_TRIM_HORIZON` - the response includes all the shards that were open at `TRIM_HORIZON`.
|
||||
* `AT_LATEST` - the response includes only the currently open shards of the data stream.
|
||||
* `AT_TIMESTAMP` - the response includes all shards whose start timestamp is less than or equal to the given timestamp and end timestamp is greater than or equal to the given timestamp or still open.
|
||||
* `ShardFilter` is used when creating leases for an empty lease table to initialize leases for a snapshot of shards specified at `RetrievalConfig#initialPositionInStreamExtended`.
|
||||
* For more information about ShardFilter, see the [official AWS documentation on ShardFilter](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ShardFilter.html).
|
||||
|
||||
* Introducing support for the `ChildShards` response of the `GetRecords` and the `SubscribeToShard` APIs to perform lease/shard synchronization that happens at `SHARD_END` for closed shards, allowing a KCL worker to only create leases for the child shards of the shard it finished processing.
|
||||
* For shared throughout consumer applications, this uses the `ChildShards` response of the `GetRecords` API. For dedicated throughput (enhanced fan-out) consumer applications, this uses the `ChildShards` response of the `SubscribeToShard` API.
|
||||
* For more information, see the official AWS Documentation on [GetRecords](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html), [SubscribeToShard](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SubscribeToShard.html), and [ChildShard](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ChildShard.html).
|
||||
|
||||
* KCL now also performs additional periodic shard/lease scans in order to identify any potential holes in the lease table to ensure the complete hash range of the stream is being processed and create leases for them if required. `PeriodicShardSyncManager` is the new component that is responsible for running periodic lease/shard scans.
|
||||
* New configuration options are available to configure `PeriodicShardSyncManager` in `LeaseManagementConfig`
|
||||
|
||||
| Name | Default | Description |
|
||||
| ----------------------------------------------------- | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| leasesRecoveryAuditorExecutionFrequencyMillis | 120000 (2 minutes) | Frequency (in millis) of the auditor job to scan for partial leases in the lease table. If the auditor detects any hole in the leases for a stream, then it would trigger shard sync based on leasesRecoveryAuditorInconsistencyConfidenceThreshold. |
|
||||
| leasesRecoveryAuditorInconsistencyConfidenceThreshold | 3 | Confidence threshold for the periodic auditor job to determine if leases for a stream in the lease table is inconsistent. If the auditor finds same set of inconsistencies consecutively for a stream for this many times, then it would trigger a shard sync |
|
||||
|
||||
* New CloudWatch metrics are also now emitted to monitor the health of `PeriodicShardSyncManager`:
|
||||
|
||||
| Name | Description |
|
||||
| --------------------------- | ------------------------------------------------------ |
|
||||
| NumStreamsWithPartialLeases | Number of streams that had holes in their hash ranges. |
|
||||
| NumStreamsToSync | Number of streams which underwent a full shard sync. |
|
||||
|
||||
* Introducing deferred lease cleanup. Leases will be deleted asynchronously by `LeaseCleanupManager` upon reaching `SHARD_END`, when a shard has either expired past the stream’s retention period or been closed as the result of a resharding operation.
|
||||
* New configuration options are available to configure `LeaseCleanupManager`.
|
||||
|
||||
| Name | Default | Description |
|
||||
| ----------------------------------- | ---------- | --------------------------------------------------------------------------------------------------------- |
|
||||
| leaseCleanupIntervalMillis | 1 minute | Interval at which to run lease cleanup thread. |
|
||||
| completedLeaseCleanupIntervalMillis | 5 minutes | Interval at which to check if a lease is completed or not. |
|
||||
| garbageLeaseCleanupIntervalMillis | 30 minutes | Interval at which to check if a lease is garbage (i.e trimmed past the stream's retention period) or not. |
|
||||
|
||||
* Introducing _experimental_ support for multistreaming, allowing a single KCL application to multiplex processing multiple streams.
|
||||
* New configuration options are available to enable multistreaming in `RetrievalConfig#appStreamTracker`.
|
||||
|
||||
* Fixing a bug in `PrefetchRecordsPublisher` restarting while it was already running.
|
||||
* Including an optimization to `HierarchicalShardSyncer` to only create leases for one layer of shards.
|
||||
* Adding support to prepare and commit lease checkpoints with arbitrary bytes.
|
||||
* This allows checkpointing of an arbitrary byte buffer up to the maximum permitted DynamoDB item size ([currently 400 KB as of release](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)), and can be used for recovery by passing a serialized byte buffer to `RecordProcessorCheckpointer#prepareCheckpoint` and `RecordProcessorCheckpointer#checkpoint`.
|
||||
* Upgrading version of AWS SDK to 2.14.0.
|
||||
* [#725](https://github.com/awslabs/amazon-kinesis-client/pull/725) Allowing KCL to consider lease tables in `UPDATING` healthy.
|
||||
|
||||
### For remaining release notes check **[CHANGELOG.md][changelog-md]**.
|
||||
| KCL Version | Changelog |
|
||||
| --- | --- |
|
||||
| 2.x | [master/CHANGELOG.md](CHANGELOG.md) |
|
||||
| 1.x | [v1.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v1.x/CHANGELOG.md) |
|
||||
|
||||
[kinesis]: http://aws.amazon.com/kinesis
|
||||
[kinesis-forum]: http://developer.amazonwebservices.com/connect/forum.jspa?forumID=169
|
||||
|
|
@ -231,5 +92,4 @@ The recommended way to use the KCL for Java is to consume it from Maven.
|
|||
[kinesis-guide-consumer-deaggregation]: http://docs.aws.amazon.com//kinesis/latest/dev/kinesis-kpl-consumer-deaggregation.html
|
||||
[kclpy]: https://github.com/awslabs/amazon-kinesis-client-python
|
||||
[multi-lang-protocol]: https://github.com/awslabs/amazon-kinesis-client/blob/master/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java
|
||||
[changelog-md]: https://github.com/awslabs/amazon-kinesis-client/blob/master/CHANGELOG.md
|
||||
[migration-guide]: https://docs.aws.amazon.com/streams/latest/dev/kcl-migration.html
|
||||
|
|
|
|||
|
|
@ -21,14 +21,14 @@
|
|||
<parent>
|
||||
<artifactId>amazon-kinesis-client-pom</artifactId>
|
||||
<groupId>software.amazon.kinesis</groupId>
|
||||
<version>2.4.3</version>
|
||||
<version>2.5.3-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>amazon-kinesis-client-multilang</artifactId>
|
||||
|
||||
<properties>
|
||||
<aws-java-sdk.version>1.12.296</aws-java-sdk.version>
|
||||
<aws-java-sdk.version>1.12.405</aws-java-sdk.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
|
|
@ -130,7 +130,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.8.1</version>
|
||||
<version>3.11.0</version>
|
||||
<configuration>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
|
|
@ -143,7 +143,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>3.3.1</version>
|
||||
<version>3.5.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
|
|
|
|||
|
|
@ -45,11 +45,11 @@ public class MultiLangDaemonConfig {
|
|||
private static final String PROP_PROCESSING_LANGUAGE = "processingLanguage";
|
||||
private static final String PROP_MAX_ACTIVE_THREADS = "maxActiveThreads";
|
||||
|
||||
private MultiLangDaemonConfiguration multiLangDaemonConfiguration;
|
||||
private final MultiLangDaemonConfiguration multiLangDaemonConfiguration;
|
||||
|
||||
private ExecutorService executorService;
|
||||
private final ExecutorService executorService;
|
||||
|
||||
private MultiLangRecordProcessorFactory recordProcessorFactory;
|
||||
private final MultiLangRecordProcessorFactory recordProcessorFactory;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
|
|
@ -165,7 +165,6 @@ public class MultiLangDaemonConfig {
|
|||
propertyStream.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static boolean validateProperties(Properties properties) {
|
||||
|
|
@ -182,12 +181,12 @@ public class MultiLangDaemonConfig {
|
|||
log.debug("Value for {} property is {}", PROP_MAX_ACTIVE_THREADS, maxActiveThreads);
|
||||
if (maxActiveThreads <= 0) {
|
||||
log.info("Using a cached thread pool.");
|
||||
return new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
|
||||
return new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<>(),
|
||||
builder.build());
|
||||
} else {
|
||||
log.info("Using a fixed thread pool with {} max active threads.", maxActiveThreads);
|
||||
return new ThreadPoolExecutor(maxActiveThreads, maxActiveThreads, 0L, TimeUnit.MILLISECONDS,
|
||||
new LinkedBlockingQueue<Runnable>(), builder.build());
|
||||
new LinkedBlockingQueue<>(), builder.build());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ import java.util.concurrent.ExecutorService;
|
|||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration;
|
||||
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput;
|
|||
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
||||
|
||||
|
||||
/**
|
||||
* A record processor that manages creating a child process that implements the multi language protocol and connecting
|
||||
* that child process's input and outputs to a {@link MultiLangProtocol} object and calling the appropriate methods on
|
||||
|
|
@ -50,20 +49,20 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor {
|
|||
|
||||
private Future<?> stderrReadTask;
|
||||
|
||||
private MessageWriter messageWriter;
|
||||
private MessageReader messageReader;
|
||||
private DrainChildSTDERRTask readSTDERRTask;
|
||||
private final MessageWriter messageWriter;
|
||||
private final MessageReader messageReader;
|
||||
private final DrainChildSTDERRTask readSTDERRTask;
|
||||
|
||||
private ProcessBuilder processBuilder;
|
||||
private final ProcessBuilder processBuilder;
|
||||
private Process process;
|
||||
private ExecutorService executorService;
|
||||
private final ExecutorService executorService;
|
||||
private ProcessState state;
|
||||
|
||||
private ObjectMapper objectMapper;
|
||||
private final ObjectMapper objectMapper;
|
||||
|
||||
private MultiLangProtocol protocol;
|
||||
|
||||
private MultiLangDaemonConfiguration configuration;
|
||||
private final MultiLangDaemonConfiguration configuration;
|
||||
|
||||
@Override
|
||||
public void initialize(InitializationInput initializationInput) {
|
||||
|
|
@ -213,7 +212,6 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor {
|
|||
this.readSTDERRTask = readSTDERRTask;
|
||||
this.configuration = configuration;
|
||||
|
||||
|
||||
this.state = ProcessState.ACTIVE;
|
||||
}
|
||||
|
||||
|
|
@ -303,8 +301,6 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor {
|
|||
|
||||
/**
|
||||
* We provide a package level method for unit testing this call to exit.
|
||||
*
|
||||
* @param val exit value
|
||||
*/
|
||||
void exit() {
|
||||
System.exit(EXIT_VALUE);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package software.amazon.kinesis.multilang;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import com.amazonaws.regions.Regions;
|
||||
import com.google.common.base.CaseFormat;
|
||||
|
||||
import lombok.AccessLevel;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
||||
/**
|
||||
* Key-Value pairs which may be nested in, and extracted from, a property value
|
||||
* in a Java properties file. For example, given the line in a property file of
|
||||
* {@code my_key = my_value|foo=bar} and a delimiter split on {@code |} (pipe),
|
||||
* the value {@code my_value|foo=bar} would have a nested key of {@code foo}
|
||||
* and its corresponding value is {@code bar}.
|
||||
* <br/><br/>
|
||||
* The order of nested properties does not matter, and these properties are optional.
|
||||
* Customers may choose to provide, in any order, zero-or-more nested properties.
|
||||
* <br/><br/>
|
||||
* Duplicate keys are not supported, and may result in a last-write-wins outcome.
|
||||
*/
|
||||
@Slf4j
|
||||
public enum NestedPropertyKey {
|
||||
|
||||
/**
|
||||
* Specify the service endpoint where requests will be submitted.
|
||||
* This property's value must be in the following format:
|
||||
* <pre>
|
||||
* ENDPOINT ::= SERVICE_ENDPOINT "^" SIGNING_REGION
|
||||
* SERVICE_ENDPOINT ::= URL
|
||||
* SIGNING_REGION ::= AWS_REGION
|
||||
* </pre>
|
||||
*
|
||||
* It would be redundant to provide both this and {@link #ENDPOINT_REGION}.
|
||||
*
|
||||
* @see #ENDPOINT_REGION
|
||||
* @see <a href="https://docs.aws.amazon.com/general/latest/gr/rande.html">AWS Service endpoints</a>
|
||||
* @see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-regions">Available Regions</a>
|
||||
*/
|
||||
ENDPOINT {
|
||||
void visit(final NestedPropertyProcessor processor, final String endpoint) {
|
||||
final String[] tokens = endpoint.split("\\^");
|
||||
if (tokens.length != 2) {
|
||||
throw new IllegalArgumentException("Invalid " + name() + ": " + endpoint);
|
||||
}
|
||||
processor.acceptEndpoint(tokens[0], tokens[1]);
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Specify the region where service requests will be submitted. This
|
||||
* region will determine both the service endpoint and signing region.
|
||||
* <br/><br/>
|
||||
* It would be redundant to provide both this and {@link #ENDPOINT}.
|
||||
*
|
||||
* @see #ENDPOINT
|
||||
* @see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-regions">Available Regions</a>
|
||||
*/
|
||||
ENDPOINT_REGION {
|
||||
void visit(final NestedPropertyProcessor processor, final String region) {
|
||||
processor.acceptEndpointRegion(Regions.fromName(region));
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* External ids may be used when delegating access in a multi-tenant
|
||||
* environment, or to third parties.
|
||||
*
|
||||
* @see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html">
|
||||
* How to use an external ID when granting access to your AWS resources to a third party</a>
|
||||
*/
|
||||
EXTERNAL_ID {
|
||||
void visit(final NestedPropertyProcessor processor, final String externalId) {
|
||||
processor.acceptExternalId(externalId);
|
||||
}
|
||||
},
|
||||
|
||||
;
|
||||
|
||||
/**
|
||||
* Nested key within the property value. For example, a nested key-value
|
||||
* of {@code foo=bar} has a nested key of {@code foo}.
|
||||
*/
|
||||
@Getter(AccessLevel.PACKAGE)
|
||||
private final String nestedKey;
|
||||
|
||||
NestedPropertyKey() {
|
||||
// convert the enum from UPPER_SNAKE_CASE to lowerCamelCase
|
||||
nestedKey = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, name());
|
||||
}
|
||||
|
||||
abstract void visit(NestedPropertyProcessor processor, String value);
|
||||
|
||||
/**
|
||||
* Parses any number of parameters. Each nested property will prompt a
|
||||
* visit to the {@code processor}.
|
||||
*
|
||||
* @param processor processor to be invoked for every nested property
|
||||
* @param params parameters to check for a nested property key
|
||||
*/
|
||||
public static void parse(final NestedPropertyProcessor processor, final String... params) {
|
||||
// Construct a disposable cache to keep this O(n). Since parsing is
|
||||
// usually one-and-done, it's wasteful to maintain this cache in perpetuity.
|
||||
final Map<String, NestedPropertyKey> cachedKeys = new HashMap<>();
|
||||
for (final NestedPropertyKey npk : values()) {
|
||||
cachedKeys.put(npk.getNestedKey(), npk);
|
||||
}
|
||||
|
||||
for (final String param : params) {
|
||||
if (param != null) {
|
||||
final String[] tokens = param.split("=");
|
||||
if (tokens.length == 2) {
|
||||
final NestedPropertyKey npk = cachedKeys.get(tokens[0]);
|
||||
if (npk != null) {
|
||||
npk.visit(processor, tokens[1]);
|
||||
} else {
|
||||
log.warn("Unsupported nested key: {}", param);
|
||||
}
|
||||
} else if (tokens.length > 2) {
|
||||
log.warn("Malformed nested key: {}", param);
|
||||
} else {
|
||||
log.info("Parameter is not a nested key: {}", param);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package software.amazon.kinesis.multilang;
|
||||
|
||||
import com.amazonaws.regions.Regions;
|
||||
|
||||
/**
|
||||
* Defines methods to process {@link NestedPropertyKey}s.
|
||||
*/
|
||||
public interface NestedPropertyProcessor {
|
||||
|
||||
/**
|
||||
* Set the service endpoint where requests are sent.
|
||||
*
|
||||
* @param serviceEndpoint the service endpoint either with or without the protocol
|
||||
* (e.g., https://sns.us-west-1.amazonaws.com, sns.us-west-1.amazonaws.com)
|
||||
* @param signingRegion the region to use for SigV4 signing of requests (e.g. us-west-1)
|
||||
*
|
||||
* @see #acceptEndpointRegion(Regions)
|
||||
* @see <a href="https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/client/builder/AwsClientBuilder.EndpointConfiguration.html">
|
||||
* AwsClientBuilder.EndpointConfiguration</a>
|
||||
*/
|
||||
void acceptEndpoint(String serviceEndpoint, String signingRegion);
|
||||
|
||||
/**
|
||||
* Set the service endpoint where requests are sent.
|
||||
*
|
||||
* @param region Region to be used by the client. This will be used to determine both the service endpoint
|
||||
* (e.g., https://sns.us-west-1.amazonaws.com) and signing region (e.g., us-west-1) for requests.
|
||||
*
|
||||
* @see #acceptEndpoint(String, String)
|
||||
*/
|
||||
void acceptEndpointRegion(Regions region);
|
||||
|
||||
/**
|
||||
* Set the external id, an optional field to designate who can assume an IAM role.
|
||||
*
|
||||
* @param externalId external id used in the service call used to retrieve session credentials
|
||||
*/
|
||||
void acceptExternalId(String externalId);
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package software.amazon.kinesis.multilang.auth;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import com.amazonaws.auth.AWSSessionCredentials;
|
||||
import com.amazonaws.auth.AWSSessionCredentialsProvider;
|
||||
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
|
||||
import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider.Builder;
|
||||
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
|
||||
import com.amazonaws.regions.Regions;
|
||||
import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
|
||||
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
|
||||
|
||||
import software.amazon.kinesis.multilang.NestedPropertyKey;
|
||||
import software.amazon.kinesis.multilang.NestedPropertyProcessor;
|
||||
|
||||
/**
|
||||
* An {@link AWSSessionCredentialsProvider} that is backed by STSAssumeRole.
|
||||
*/
|
||||
public class KclSTSAssumeRoleSessionCredentialsProvider
|
||||
implements AWSSessionCredentialsProvider, NestedPropertyProcessor {
|
||||
|
||||
private final Builder builder;
|
||||
|
||||
private final STSAssumeRoleSessionCredentialsProvider provider;
|
||||
|
||||
/**
|
||||
*
|
||||
* @param params vararg parameters which must include roleArn at index=0,
|
||||
* and roleSessionName at index=1
|
||||
*/
|
||||
public KclSTSAssumeRoleSessionCredentialsProvider(final String[] params) {
|
||||
this(params[0], params[1], Arrays.copyOfRange(params, 2, params.length));
|
||||
}
|
||||
|
||||
public KclSTSAssumeRoleSessionCredentialsProvider(final String roleArn, final String roleSessionName,
|
||||
final String... params) {
|
||||
builder = new Builder(roleArn, roleSessionName);
|
||||
NestedPropertyKey.parse(this, params);
|
||||
provider = builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public AWSSessionCredentials getCredentials() {
|
||||
return provider.getCredentials();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void refresh() {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public void acceptEndpoint(final String serviceEndpoint, final String signingRegion) {
|
||||
final EndpointConfiguration endpoint = new EndpointConfiguration(serviceEndpoint, signingRegion);
|
||||
final AWSSecurityTokenService stsClient = AWSSecurityTokenServiceClient.builder()
|
||||
.withEndpointConfiguration(endpoint)
|
||||
.build();
|
||||
builder.withStsClient(stsClient);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void acceptEndpointRegion(final Regions region) {
|
||||
final AWSSecurityTokenService stsClient = AWSSecurityTokenServiceClient.builder()
|
||||
.withRegion(region)
|
||||
.build();
|
||||
builder.withStsClient(stsClient);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void acceptExternalId(final String externalId) {
|
||||
builder.withExternalId(externalId);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -14,23 +14,23 @@
|
|||
*/
|
||||
package software.amazon.kinesis.multilang.config;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.auth.AWSCredentialsProviderChain;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain;
|
||||
|
||||
/**
|
||||
* Get AWSCredentialsProvider property.
|
||||
*/
|
||||
@Slf4j
|
||||
class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecoder<AWSCredentialsProvider> {
|
||||
private static final String AUTH_PREFIX = "com.amazonaws.auth.";
|
||||
private static final String LIST_DELIMITER = ",";
|
||||
private static final String ARG_DELIMITER = "|";
|
||||
|
||||
|
|
@ -65,35 +65,59 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode
|
|||
*/
|
||||
@Override
|
||||
public List<Class<AWSCredentialsProvider>> getSupportedTypes() {
|
||||
return Arrays.asList(AWSCredentialsProvider.class);
|
||||
return Collections.singletonList(AWSCredentialsProvider.class);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* Convert string list to a list of valid credentials providers.
|
||||
*/
|
||||
private static List<AWSCredentialsProvider> getValidCredentialsProviders(List<String> providerNames) {
|
||||
List<AWSCredentialsProvider> credentialsProviders = new ArrayList<>();
|
||||
|
||||
for (String providerName : providerNames) {
|
||||
if (providerName.contains(ARG_DELIMITER)) {
|
||||
String[] nameAndArgs = providerName.split("\\" + ARG_DELIMITER);
|
||||
Class<?>[] argTypes = new Class<?>[nameAndArgs.length - 1];
|
||||
Arrays.fill(argTypes, String.class);
|
||||
try {
|
||||
Class<?> className = Class.forName(nameAndArgs[0]);
|
||||
Constructor<?> c = className.getConstructor(argTypes);
|
||||
credentialsProviders.add((AWSCredentialsProvider) c
|
||||
.newInstance(Arrays.copyOfRange(nameAndArgs, 1, nameAndArgs.length)));
|
||||
} catch (Exception e) {
|
||||
log.debug("Can't find any credentials provider matching {}.", providerName);
|
||||
final String[] nameAndArgs = providerName.split("\\" + ARG_DELIMITER);
|
||||
final Class<? extends AWSCredentialsProvider> clazz;
|
||||
try {
|
||||
final Class<?> c = Class.forName(nameAndArgs[0]);
|
||||
if (!AWSCredentialsProvider.class.isAssignableFrom(c)) {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
Class<?> className = Class.forName(providerName);
|
||||
credentialsProviders.add((AWSCredentialsProvider) className.newInstance());
|
||||
} catch (Exception e) {
|
||||
log.debug("Can't find any credentials provider matching {}.", providerName);
|
||||
clazz = (Class<? extends AWSCredentialsProvider>) c;
|
||||
} catch (ClassNotFoundException cnfe) {
|
||||
// Providers are a product of prefixed Strings to cover multiple
|
||||
// namespaces (e.g., "Foo" -> { "some.auth.Foo", "kcl.auth.Foo" }).
|
||||
// It's expected that many class names will not resolve.
|
||||
continue;
|
||||
}
|
||||
log.info("Attempting to construct {}", clazz);
|
||||
|
||||
AWSCredentialsProvider provider = null;
|
||||
if (nameAndArgs.length > 1) {
|
||||
final String[] varargs = Arrays.copyOfRange(nameAndArgs, 1, nameAndArgs.length);
|
||||
|
||||
// attempt to invoke an explicit N-arg constructor of FooClass(String, String, ...)
|
||||
provider = constructProvider(providerName, () -> {
|
||||
Class<?>[] argTypes = new Class<?>[nameAndArgs.length - 1];
|
||||
Arrays.fill(argTypes, String.class);
|
||||
return clazz.getConstructor(argTypes).newInstance(varargs);
|
||||
});
|
||||
|
||||
if (provider == null) {
|
||||
// attempt to invoke a public varargs/array constructor of FooClass(String[])
|
||||
provider = constructProvider(providerName, () ->
|
||||
clazz.getConstructor(String[].class).newInstance((Object) varargs)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (provider == null) {
|
||||
// regardless of parameters, fallback to invoke a public no-arg constructor
|
||||
provider = constructProvider(providerName, clazz::newInstance);
|
||||
}
|
||||
|
||||
if (provider != null) {
|
||||
credentialsProviders.add(provider);
|
||||
}
|
||||
}
|
||||
return credentialsProviders;
|
||||
}
|
||||
|
|
@ -101,7 +125,7 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode
|
|||
private static List<String> getProviderNames(String property) {
|
||||
// assume list delimiter is ","
|
||||
String[] elements = property.split(LIST_DELIMITER);
|
||||
List<String> result = new ArrayList<String>();
|
||||
List<String> result = new ArrayList<>();
|
||||
for (int i = 0; i < elements.length; i++) {
|
||||
String string = elements[i].trim();
|
||||
if (!string.isEmpty()) {
|
||||
|
|
@ -112,20 +136,48 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode
|
|||
return result;
|
||||
}
|
||||
|
||||
private static List<String> getPossibleFullClassNames(String s) {
|
||||
/*
|
||||
* We take care of three cases :
|
||||
*
|
||||
* 1. Customer provides a short name of common providers in com.amazonaws.auth package i.e. any classes
|
||||
* implementing the AWSCredentialsProvider interface:
|
||||
* http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html
|
||||
*
|
||||
* 2. Customer provides a full name of common providers e.g. com.amazonaws.auth.ClasspathFileCredentialsProvider
|
||||
*
|
||||
* 3. Customer provides a custom credentials provider with full name of provider
|
||||
*/
|
||||
private static List<String> getPossibleFullClassNames(final String provider) {
|
||||
return Stream.of(
|
||||
// Customer provides a short name of common providers in com.amazonaws.auth package
|
||||
// (e.g., any classes implementing the AWSCredentialsProvider interface)
|
||||
// @see http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html
|
||||
"com.amazonaws.auth.",
|
||||
|
||||
return Arrays.asList(s, AUTH_PREFIX + s);
|
||||
// Customer provides a short name of a provider offered by this multi-lang package
|
||||
"software.amazon.kinesis.multilang.auth.",
|
||||
|
||||
// Customer provides a fully-qualified provider name, or a custom credentials provider
|
||||
// (e.g., com.amazonaws.auth.ClasspathFileCredentialsProvider, org.mycompany.FooProvider)
|
||||
""
|
||||
).map(prefix -> prefix + provider).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
private interface CredentialsProviderConstructor<T extends AWSCredentialsProvider> {
|
||||
T construct() throws IllegalAccessException, InstantiationException,
|
||||
InvocationTargetException, NoSuchMethodException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to construct an {@link AWSCredentialsProvider}.
|
||||
*
|
||||
* @param providerName Raw, unmodified provider name. Should there be an
|
||||
* Exeception during construction, this parameter will be logged.
|
||||
* @param constructor supplier-like function that will perform the construction
|
||||
* @return the constructed provider, if successful; otherwise, null
|
||||
*
|
||||
* @param <T> type of the CredentialsProvider to construct
|
||||
*/
|
||||
private static <T extends AWSCredentialsProvider> T constructProvider(
|
||||
final String providerName, final CredentialsProviderConstructor<T> constructor) {
|
||||
try {
|
||||
return constructor.construct();
|
||||
} catch (NoSuchMethodException ignored) {
|
||||
// ignore
|
||||
} catch (IllegalAccessException | InstantiationException | InvocationTargetException | RuntimeException e) {
|
||||
log.warn("Failed to construct {}", providerName, e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import java.util.Arrays;
|
|||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import lombok.Getter;
|
||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
||||
|
|
@ -58,10 +57,10 @@ public class BuilderDynaBean implements DynaBean {
|
|||
}
|
||||
|
||||
public BuilderDynaBean(Class<?> destinedClass, ConvertUtilsBean convertUtilsBean,
|
||||
Function<String, ?> emtpyPropertyHandler, List<String> classPrefixSearchList) {
|
||||
Function<String, ?> emptyPropertyHandler, List<String> classPrefixSearchList) {
|
||||
this.convertUtilsBean = convertUtilsBean;
|
||||
this.classPrefixSearchList = classPrefixSearchList;
|
||||
this.emptyPropertyHandler = emtpyPropertyHandler;
|
||||
this.emptyPropertyHandler = emptyPropertyHandler;
|
||||
initialize(destinedClass);
|
||||
}
|
||||
|
||||
|
|
@ -150,7 +149,6 @@ public class BuilderDynaBean implements DynaBean {
|
|||
} else {
|
||||
return expected.cast(dynaBeanCreateSupport.build());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void validateResolvedEmptyHandler() {
|
||||
|
|
|
|||
|
|
@ -23,8 +23,9 @@ import org.apache.commons.beanutils.BeanUtilsBean;
|
|||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import software.amazon.awssdk.arns.Arn;
|
||||
import software.amazon.kinesis.common.StreamIdentifier;
|
||||
|
||||
/**
|
||||
* KinesisClientLibConfigurator constructs a KinesisClientLibConfiguration from java properties file. The following
|
||||
|
|
@ -40,7 +41,6 @@ public class KinesisClientLibConfigurator {
|
|||
private final BeanUtilsBean utilsBean;
|
||||
private final MultiLangDaemonConfiguration configuration;
|
||||
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*/
|
||||
|
|
@ -55,13 +55,14 @@ public class KinesisClientLibConfigurator {
|
|||
* Program will fail immediately, if customer provide: 1) invalid variable value. Program will log it as warning and
|
||||
* continue, if customer provide: 1) variable with unsupported variable type. 2) a variable with name which does not
|
||||
* match any of the variables in KinesisClientLibConfigration.
|
||||
*
|
||||
*
|
||||
* @param properties a Properties object containing the configuration information
|
||||
* @return KinesisClientLibConfiguration
|
||||
*/
|
||||
public MultiLangDaemonConfiguration getConfiguration(Properties properties) {
|
||||
properties.entrySet().forEach(e -> {
|
||||
try {
|
||||
log.info("Processing (key={}, value={})", e.getKey(), e.getValue());
|
||||
utilsBean.setProperty(configuration, (String) e.getKey(), e.getValue());
|
||||
} catch (IllegalAccessException | InvocationTargetException ex) {
|
||||
throw new RuntimeException(ex);
|
||||
|
|
@ -69,8 +70,18 @@ public class KinesisClientLibConfigurator {
|
|||
});
|
||||
|
||||
Validate.notBlank(configuration.getApplicationName(), "Application name is required");
|
||||
Validate.notBlank(configuration.getStreamName(), "Stream name is required");
|
||||
|
||||
if (configuration.getStreamArn() != null && !configuration.getStreamArn().trim().isEmpty()) {
|
||||
final Arn streamArnObj = Arn.fromString(configuration.getStreamArn());
|
||||
StreamIdentifier.validateArn(streamArnObj);
|
||||
//Parse out the stream Name from the Arn (and/or override existing value for Stream Name)
|
||||
final String streamNameFromArn = streamArnObj.resource().resource();
|
||||
configuration.setStreamName(streamNameFromArn);
|
||||
}
|
||||
|
||||
Validate.notBlank(configuration.getStreamName(), "Stream name or Stream Arn is required. Stream Arn takes precedence if both are passed in.");
|
||||
Validate.isTrue(configuration.getKinesisCredentialsProvider().isDirty(), "A basic set of AWS credentials must be provided");
|
||||
|
||||
return configuration;
|
||||
}
|
||||
|
||||
|
|
@ -96,5 +107,4 @@ public class KinesisClientLibConfigurator {
|
|||
return getConfiguration(properties);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ import java.util.UUID;
|
|||
import java.util.function.Function;
|
||||
|
||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
||||
import org.apache.commons.beanutils.ConvertUtils;
|
||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
||||
import org.apache.commons.beanutils.Converter;
|
||||
import org.apache.commons.beanutils.converters.ArrayConverter;
|
||||
|
|
@ -73,6 +72,8 @@ public class MultiLangDaemonConfiguration {
|
|||
private String applicationName;
|
||||
|
||||
private String streamName;
|
||||
private String streamArn;
|
||||
|
||||
|
||||
@ConfigurationSettable(configurationClass = ConfigsBuilder.class)
|
||||
private String tableName;
|
||||
|
|
@ -157,7 +158,6 @@ public class MultiLangDaemonConfiguration {
|
|||
metricsEnabledDimensions = new HashSet<>(Arrays.asList(dimensions));
|
||||
}
|
||||
|
||||
|
||||
private RetrievalMode retrievalMode = RetrievalMode.DEFAULT;
|
||||
|
||||
private final FanoutConfigBean fanoutConfig = new FanoutConfigBean();
|
||||
|
|
@ -169,7 +169,6 @@ public class MultiLangDaemonConfiguration {
|
|||
private long shutdownGraceMillis;
|
||||
private Integer timeoutInSeconds;
|
||||
|
||||
|
||||
private final BuilderDynaBean kinesisCredentialsProvider;
|
||||
|
||||
public void setAWSCredentialsProvider(String providerString) {
|
||||
|
|
@ -403,4 +402,4 @@ public class MultiLangDaemonConfiguration {
|
|||
return resolvedConfiguration(shardRecordProcessorFactory).build();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -32,7 +32,8 @@ public class V2CredentialWrapper implements AwsCredentialsProvider {
|
|||
public AwsCredentials resolveCredentials() {
|
||||
AWSCredentials current = oldCredentialsProvider.getCredentials();
|
||||
if (current instanceof AWSSessionCredentials) {
|
||||
return AwsSessionCredentials.create(current.getAWSAccessKeyId(), current.getAWSSecretKey(), ((AWSSessionCredentials) current).getSessionToken());
|
||||
return AwsSessionCredentials.create(current.getAWSAccessKeyId(), current.getAWSSecretKey(),
|
||||
((AWSSessionCredentials) current).getSessionToken());
|
||||
}
|
||||
return new AwsCredentials() {
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -28,16 +28,15 @@ import org.mockito.Mockito;
|
|||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import software.amazon.kinesis.multilang.MessageReader;
|
||||
import software.amazon.kinesis.multilang.messages.Message;
|
||||
import software.amazon.kinesis.multilang.messages.StatusMessage;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
public class MessageReaderTest {
|
||||
|
||||
private static final String shardId = "shard-123";
|
||||
private static final String SHARD_ID = "shard-123";
|
||||
|
||||
/*
|
||||
/**
|
||||
* This line is based on the definition of the protocol for communication between the KCL record processor and
|
||||
* the client's process.
|
||||
*/
|
||||
|
|
@ -45,7 +44,7 @@ public class MessageReaderTest {
|
|||
return String.format("{\"action\":\"checkpoint\", \"checkpoint\":\"%s\"}", sequenceNumber);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* This line is based on the definition of the protocol for communication between the KCL record processor and
|
||||
* the client's process.
|
||||
*/
|
||||
|
|
@ -80,10 +79,9 @@ public class MessageReaderTest {
|
|||
String[] responseFors = new String[] { "initialize", "processRecords", "processRecords", "shutdown" };
|
||||
InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors);
|
||||
MessageReader reader =
|
||||
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
|
||||
for (String responseFor : responseFors) {
|
||||
StatusMessage statusMessage = null;
|
||||
try {
|
||||
Message message = reader.getNextMessageFromSTDOUT().get();
|
||||
if (message instanceof StatusMessage) {
|
||||
|
|
@ -103,14 +101,14 @@ public class MessageReaderTest {
|
|||
InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors);
|
||||
|
||||
MessageReader reader =
|
||||
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
Future<Boolean> drainFuture = reader.drainSTDOUT();
|
||||
Boolean drainResult = drainFuture.get();
|
||||
Assert.assertNotNull(drainResult);
|
||||
Assert.assertTrue(drainResult);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* readValue should fail safely and just continue looping
|
||||
*/
|
||||
@Test
|
||||
|
|
@ -135,7 +133,7 @@ public class MessageReaderTest {
|
|||
}
|
||||
|
||||
MessageReader reader =
|
||||
new MessageReader().initialize(bufferReader, shardId, new ObjectMapper(),
|
||||
new MessageReader().initialize(bufferReader, SHARD_ID, new ObjectMapper(),
|
||||
Executors.newCachedThreadPool());
|
||||
|
||||
try {
|
||||
|
|
@ -150,7 +148,7 @@ public class MessageReaderTest {
|
|||
public void messageReaderBuilderTest() {
|
||||
InputStream stream = new ByteArrayInputStream("".getBytes());
|
||||
MessageReader reader =
|
||||
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
Assert.assertNotNull(reader);
|
||||
}
|
||||
|
||||
|
|
@ -159,7 +157,7 @@ public class MessageReaderTest {
|
|||
BufferedReader input = Mockito.mock(BufferedReader.class);
|
||||
Mockito.doThrow(IOException.class).when(input).readLine();
|
||||
MessageReader reader =
|
||||
new MessageReader().initialize(input, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
new MessageReader().initialize(input, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
|
||||
Future<Message> readTask = reader.getNextMessageFromSTDOUT();
|
||||
|
||||
|
|
@ -177,7 +175,7 @@ public class MessageReaderTest {
|
|||
public void noMoreMessagesTest() throws InterruptedException {
|
||||
InputStream stream = new ByteArrayInputStream("".getBytes());
|
||||
MessageReader reader =
|
||||
new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
Future<Message> future = reader.getNextMessageFromSTDOUT();
|
||||
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -32,35 +32,30 @@ import org.mockito.Mockito;
|
|||
|
||||
import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
|
||||
import software.amazon.kinesis.lifecycle.events.ShardEndedInput;
|
||||
import software.amazon.kinesis.multilang.MessageWriter;
|
||||
import software.amazon.kinesis.multilang.messages.LeaseLostMessage;
|
||||
import software.amazon.kinesis.multilang.messages.Message;
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
||||
import software.amazon.kinesis.lifecycle.ShutdownReason;
|
||||
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
||||
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
public class MessageWriterTest {
|
||||
|
||||
private static final String shardId = "shard-123";
|
||||
private static final String SHARD_ID = "shard-123";
|
||||
MessageWriter messageWriter;
|
||||
OutputStream stream;
|
||||
|
||||
@Rule
|
||||
public final ExpectedException thrown = ExpectedException.none();
|
||||
|
||||
// ExecutorService executor;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
stream = Mockito.mock(OutputStream.class);
|
||||
messageWriter =
|
||||
new MessageWriter().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
new MessageWriter().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool());
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -86,7 +81,7 @@ public class MessageWriterTest {
|
|||
|
||||
@Test
|
||||
public void writeInitializeMessageTest() throws IOException, InterruptedException, ExecutionException {
|
||||
Future<Boolean> future = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build());
|
||||
Future<Boolean> future = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(SHARD_ID).build());
|
||||
future.get();
|
||||
verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(),
|
||||
Mockito.anyInt());
|
||||
|
|
@ -131,20 +126,20 @@ public class MessageWriterTest {
|
|||
@Test
|
||||
public void streamIOExceptionTest() throws IOException, InterruptedException, ExecutionException {
|
||||
Mockito.doThrow(IOException.class).when(stream).flush();
|
||||
Future<Boolean> initializeTask = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build());
|
||||
Future<Boolean> initializeTask = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(SHARD_ID).build());
|
||||
Boolean result = initializeTask.get();
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void objectMapperFails() throws JsonProcessingException, InterruptedException, ExecutionException {
|
||||
public void objectMapperFails() throws JsonProcessingException {
|
||||
thrown.expect(RuntimeException.class);
|
||||
thrown.expectMessage("Encountered I/O error while writing LeaseLostMessage action to subprocess");
|
||||
|
||||
ObjectMapper mapper = Mockito.mock(ObjectMapper.class);
|
||||
Mockito.doThrow(JsonProcessingException.class).when(mapper).writeValueAsString(Mockito.any(Message.class));
|
||||
messageWriter = new MessageWriter().initialize(stream, shardId, mapper, Executors.newCachedThreadPool());
|
||||
messageWriter = new MessageWriter().initialize(stream, SHARD_ID, mapper, Executors.newCachedThreadPool());
|
||||
|
||||
messageWriter.writeLeaseLossMessage(LeaseLostInput.builder().build());
|
||||
}
|
||||
|
|
@ -157,7 +152,7 @@ public class MessageWriterTest {
|
|||
Assert.assertFalse(this.messageWriter.isOpen());
|
||||
try {
|
||||
// Any message should fail
|
||||
this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build());
|
||||
this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(SHARD_ID).build());
|
||||
Assert.fail("MessageWriter should be closed and unable to write.");
|
||||
} catch (IllegalStateException e) {
|
||||
// This should happen.
|
||||
|
|
|
|||
|
|
@ -14,17 +14,14 @@
|
|||
*/
|
||||
package software.amazon.kinesis.multilang;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
||||
import org.junit.Before;
|
||||
import software.amazon.awssdk.regions.Region;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
|
|
@ -39,58 +36,160 @@ import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
|||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class MultiLangDaemonConfigTest {
|
||||
private static String FILENAME = "some.properties";
|
||||
private static final String FILENAME = "multilang.properties";
|
||||
private static final String EXE = "TestExe.exe";
|
||||
private static final String APPLICATION_NAME = MultiLangDaemonConfigTest.class.getSimpleName();
|
||||
private static final String STREAM_NAME = "fakeStream";
|
||||
private static final String STREAM_NAME_IN_ARN = "FAKE_STREAM_NAME";
|
||||
private static final Region REGION = Region.US_EAST_1;
|
||||
private static final String STREAM_ARN = "arn:aws:kinesis:us-east-2:012345678987:stream/" + STREAM_NAME_IN_ARN;
|
||||
|
||||
@Mock
|
||||
private ClassLoader classLoader;
|
||||
|
||||
@Mock
|
||||
private AwsCredentialsProvider credentialsProvider;
|
||||
@Mock
|
||||
private AwsCredentials creds;
|
||||
@Mock
|
||||
private KinesisClientLibConfigurator configurator;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean();
|
||||
BeanUtilsBean utilsBean = new BeanUtilsBean(convertUtilsBean);
|
||||
MultiLangDaemonConfiguration multiLangDaemonConfiguration = new MultiLangDaemonConfiguration(utilsBean,
|
||||
convertUtilsBean);
|
||||
multiLangDaemonConfiguration.setApplicationName("cool-app");
|
||||
multiLangDaemonConfiguration.setStreamName("cool-stream");
|
||||
multiLangDaemonConfiguration.setWorkerIdentifier("cool-worker");
|
||||
when(credentialsProvider.resolveCredentials()).thenReturn(creds);
|
||||
when(creds.accessKeyId()).thenReturn("cool-user");
|
||||
when(configurator.getConfiguration(any(Properties.class))).thenReturn(multiLangDaemonConfiguration);
|
||||
}
|
||||
private final KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator();
|
||||
private MultiLangDaemonConfig deamonConfig;
|
||||
|
||||
@Test
|
||||
public void constructorTest() throws IOException {
|
||||
String PROPERTIES = "executableName = randomEXE \n" + "applicationName = testApp \n"
|
||||
+ "streamName = fakeStream \n" + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n"
|
||||
+ "processingLanguage = malbolge";
|
||||
ClassLoader classLoader = Mockito.mock(ClassLoader.class);
|
||||
/**
|
||||
* Instantiate a MultiLangDaemonConfig object
|
||||
* @param streamName
|
||||
* @param streamArn
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setup(String streamName, String streamArn) throws IOException {
|
||||
String properties = String.format("executableName = %s\n"
|
||||
+ "applicationName = %s\n"
|
||||
+ "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n"
|
||||
+ "processingLanguage = malbolge\n"
|
||||
+ "regionName = %s\n",
|
||||
EXE,
|
||||
APPLICATION_NAME,
|
||||
"us-east-1");
|
||||
|
||||
Mockito.doReturn(new ByteArrayInputStream(PROPERTIES.getBytes())).when(classLoader)
|
||||
if (streamName != null) {
|
||||
properties += String.format("streamName = %s\n", streamName);
|
||||
}
|
||||
if (streamArn != null) {
|
||||
properties += String.format("streamArn = %s\n", streamArn);
|
||||
}
|
||||
classLoader = Mockito.mock(ClassLoader.class);
|
||||
|
||||
Mockito.doReturn(new ByteArrayInputStream(properties.getBytes())).when(classLoader)
|
||||
.getResourceAsStream(FILENAME);
|
||||
|
||||
MultiLangDaemonConfig deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
||||
when(credentialsProvider.resolveCredentials()).thenReturn(creds);
|
||||
when(creds.accessKeyId()).thenReturn("cool-user");
|
||||
deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
||||
}
|
||||
|
||||
assertNotNull(deamonConfig.getExecutorService());
|
||||
assertNotNull(deamonConfig.getMultiLangDaemonConfiguration());
|
||||
assertNotNull(deamonConfig.getRecordProcessorFactory());
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testConstructorFailsBecauseStreamArnIsInvalid() throws Exception {
|
||||
setup("", "this_is_not_a_valid_arn");
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testConstructorFailsBecauseStreamArnIsInvalid2() throws Exception {
|
||||
setup("", "arn:aws:kinesis:us-east-2:ACCOUNT_ID:BadFormatting:stream/" + STREAM_NAME_IN_ARN);
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testConstructorFailsBecauseStreamNameAndArnAreEmpty() throws Exception {
|
||||
setup("", "");
|
||||
}
|
||||
|
||||
@Test(expected = NullPointerException.class)
|
||||
public void testConstructorFailsBecauseStreamNameAndArnAreNull() throws Exception {
|
||||
setup(null, null);
|
||||
}
|
||||
|
||||
@Test(expected = NullPointerException.class)
|
||||
public void testConstructorFailsBecauseStreamNameIsNullAndArnIsEmpty() throws Exception {
|
||||
setup(null, "");
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testConstructorFailsBecauseStreamNameIsEmptyAndArnIsNull() throws Exception {
|
||||
setup("", null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void propertyValidation() {
|
||||
String PROPERTIES_NO_EXECUTABLE_NAME = "applicationName = testApp \n" + "streamName = fakeStream \n"
|
||||
public void testConstructorUsingStreamName() throws IOException {
|
||||
setup(STREAM_NAME, null);
|
||||
|
||||
assertConfigurationsMatch(STREAM_NAME, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConstructorUsingStreamNameAndStreamArnIsEmpty() throws IOException {
|
||||
setup(STREAM_NAME, "");
|
||||
|
||||
assertConfigurationsMatch(STREAM_NAME, "");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConstructorUsingStreamNameAndStreamArnIsWhitespace() throws IOException {
|
||||
setup(STREAM_NAME, " ");
|
||||
|
||||
assertConfigurationsMatch(STREAM_NAME, "");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConstructorUsingStreamArn() throws IOException {
|
||||
setup(null, STREAM_ARN);
|
||||
|
||||
assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConstructorUsingStreamNameAsEmptyAndStreamArn() throws IOException {
|
||||
setup("", STREAM_ARN);
|
||||
|
||||
assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConstructorUsingStreamArnOverStreamName() throws IOException {
|
||||
setup(STREAM_NAME, STREAM_ARN);
|
||||
|
||||
assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify the daemonConfig properties are what we expect them to be.
|
||||
*
|
||||
* @param expectedStreamName
|
||||
*/
|
||||
private void assertConfigurationsMatch(String expectedStreamName, String expectedStreamArn) {
|
||||
final MultiLangDaemonConfiguration multiLangConfiguration = deamonConfig.getMultiLangDaemonConfiguration();
|
||||
assertNotNull(deamonConfig.getExecutorService());
|
||||
assertNotNull(multiLangConfiguration);
|
||||
assertNotNull(deamonConfig.getRecordProcessorFactory());
|
||||
|
||||
assertEquals(EXE, deamonConfig.getRecordProcessorFactory().getCommandArray()[0]);
|
||||
assertEquals(APPLICATION_NAME, multiLangConfiguration.getApplicationName());
|
||||
assertEquals(expectedStreamName, multiLangConfiguration.getStreamName());
|
||||
assertEquals(REGION, multiLangConfiguration.getDynamoDbClient().get("region"));
|
||||
assertEquals(REGION, multiLangConfiguration.getCloudWatchClient().get("region"));
|
||||
assertEquals(REGION, multiLangConfiguration.getKinesisClient().get("region"));
|
||||
assertEquals(expectedStreamArn, multiLangConfiguration.getStreamArn());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPropertyValidation() {
|
||||
String propertiesNoExecutableName = "applicationName = testApp \n" + "streamName = fakeStream \n"
|
||||
+ "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + "processingLanguage = malbolge";
|
||||
ClassLoader classLoader = Mockito.mock(ClassLoader.class);
|
||||
|
||||
Mockito.doReturn(new ByteArrayInputStream(PROPERTIES_NO_EXECUTABLE_NAME.getBytes())).when(classLoader)
|
||||
Mockito.doReturn(new ByteArrayInputStream(propertiesNoExecutableName.getBytes())).when(classLoader)
|
||||
.getResourceAsStream(FILENAME);
|
||||
|
||||
MultiLangDaemonConfig config;
|
||||
try {
|
||||
config = new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
||||
new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
||||
Assert.fail("Construction of the config should have failed due to property validation failing.");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Good
|
||||
|
|
@ -99,4 +198,14 @@ public class MultiLangDaemonConfigTest {
|
|||
}
|
||||
}
|
||||
|
||||
}
|
||||
/**
|
||||
* Test the loading of a "real" properties file. This test should catch
|
||||
* any issues which might arise if there is a discrepancy between reality
|
||||
* and mocking.
|
||||
*/
|
||||
@Test
|
||||
public void testActualPropertiesFile() throws Exception {
|
||||
new MultiLangDaemonConfig(FILENAME);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -17,7 +17,6 @@ package software.amazon.kinesis.multilang;
|
|||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.isEmptyOrNullString;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.mockito.Matchers.anyObject;
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
|
@ -61,10 +60,8 @@ import software.amazon.kinesis.multilang.messages.ShardEndedMessage;
|
|||
import software.amazon.kinesis.multilang.messages.StatusMessage;
|
||||
import com.google.common.util.concurrent.SettableFuture;
|
||||
|
||||
import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration;
|
||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
||||
import software.amazon.kinesis.lifecycle.ShutdownReason;
|
||||
import software.amazon.kinesis.processor.RecordProcessorCheckpointer;
|
||||
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
||||
|
||||
|
|
@ -106,7 +103,7 @@ public class MultiLangProtocolTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void initializeTest() throws InterruptedException, ExecutionException {
|
||||
public void testInitialize() {
|
||||
when(messageWriter
|
||||
.writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder()
|
||||
.shardId(shardId).build())))).thenReturn(buildFuture(true));
|
||||
|
|
@ -116,7 +113,7 @@ public class MultiLangProtocolTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void processRecordsTest() throws InterruptedException, ExecutionException {
|
||||
public void testProcessRecords() {
|
||||
when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true));
|
||||
when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(
|
||||
new StatusMessage("processRecords"), Message.class));
|
||||
|
|
@ -131,7 +128,6 @@ public class MultiLangProtocolTest {
|
|||
when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage(LeaseLostMessage.ACTION), Message.class));
|
||||
|
||||
assertThat(protocol.leaseLost(LeaseLostInput.builder().build()), equalTo(true));
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -177,7 +173,7 @@ public class MultiLangProtocolTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void processRecordsWithCheckpointsTest() throws InterruptedException, ExecutionException,
|
||||
public void testProcessRecordsWithCheckpoints() throws
|
||||
KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||
|
||||
when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true));
|
||||
|
|
@ -206,7 +202,7 @@ public class MultiLangProtocolTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void processRecordsWithABadCheckpointTest() throws InterruptedException, ExecutionException {
|
||||
public void testProcessRecordsWithABadCheckpoint() {
|
||||
when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true));
|
||||
when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(false));
|
||||
when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList<Message>() {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package software.amazon.kinesis.multilang;
|
||||
|
||||
import com.amazonaws.regions.Regions;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyZeroInteractions;
|
||||
import static software.amazon.kinesis.multilang.NestedPropertyKey.ENDPOINT;
|
||||
import static software.amazon.kinesis.multilang.NestedPropertyKey.ENDPOINT_REGION;
|
||||
import static software.amazon.kinesis.multilang.NestedPropertyKey.EXTERNAL_ID;
|
||||
import static software.amazon.kinesis.multilang.NestedPropertyKey.parse;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class NestedPropertyKeyTest {
|
||||
|
||||
@Mock
|
||||
private NestedPropertyProcessor mockProcessor;
|
||||
|
||||
@Test
|
||||
public void testExternalId() {
|
||||
final String expectedId = "eid";
|
||||
|
||||
parse(mockProcessor, createKey(EXTERNAL_ID, expectedId));
|
||||
verify(mockProcessor).acceptExternalId(expectedId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEndpoint() {
|
||||
final String expectedEndpoint = "https://sts.us-east-1.amazonaws.com";
|
||||
final String expectedRegion = "us-east-1";
|
||||
final String param = createKey(ENDPOINT, expectedEndpoint + "^" + expectedRegion);
|
||||
|
||||
parse(mockProcessor, param);
|
||||
verify(mockProcessor).acceptEndpoint(expectedEndpoint, expectedRegion);
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testInvalidEndpoint() {
|
||||
parse(mockProcessor, createKey(ENDPOINT, "value-sans-caret-delimiter"));
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testInvalidEndpointDoubleCaret() {
|
||||
parse(mockProcessor, createKey(ENDPOINT, "https://sts.us-east-1.amazonaws.com^us-east-1^borkbork"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEndpointRegion() {
|
||||
final Regions expectedRegion = Regions.GovCloud;
|
||||
|
||||
parse(mockProcessor, createKey(ENDPOINT_REGION, expectedRegion.getName()));
|
||||
verify(mockProcessor).acceptEndpointRegion(expectedRegion);
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testInvalidEndpointRegion() {
|
||||
parse(mockProcessor, createKey(ENDPOINT_REGION, "snuffleupagus"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the literal nested key (i.e., {@code key=} in {@code some_val|key=nested_val})
|
||||
* does not change. Any change to an existing literal key is not backwards-compatible.
|
||||
*/
|
||||
@Test
|
||||
public void testKeysExplicitly() {
|
||||
// Adding a new enum will deliberately cause this assert to fail, and
|
||||
// therefore raise awareness for this explicit test. Add-and-remove may
|
||||
// keep the number unchanged yet will also break (by removing an enum).
|
||||
assertEquals(3, NestedPropertyKey.values().length);
|
||||
|
||||
assertEquals("endpoint", ENDPOINT.getNestedKey());
|
||||
assertEquals("endpointRegion", ENDPOINT_REGION.getNestedKey());
|
||||
assertEquals("externalId", EXTERNAL_ID.getNestedKey());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNonmatchingParameters() {
|
||||
final String[] params = new String[] {
|
||||
null,
|
||||
"",
|
||||
"hello world", // no nested key
|
||||
"foo=bar", // nested key, but is not a recognized key
|
||||
createKey(EXTERNAL_ID, "eid") + "=extra", // valid key made invalid by second '='
|
||||
};
|
||||
parse(mockProcessor, params);
|
||||
verifyZeroInteractions(mockProcessor);
|
||||
}
|
||||
|
||||
private static String createKey(final NestedPropertyKey key, final String value) {
|
||||
return key.getNestedKey() + "=" + value;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -27,12 +27,10 @@ import org.junit.Assert;
|
|||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
import software.amazon.kinesis.multilang.DrainChildSTDERRTask;
|
||||
import software.amazon.kinesis.multilang.LineReaderTask;
|
||||
|
||||
public class ReadSTDERRTaskTest {
|
||||
|
||||
private static final String shardId = "shard-123";
|
||||
private static final String SHARD_ID = "shard-123";
|
||||
private BufferedReader mockBufferReader;
|
||||
|
||||
@Before
|
||||
|
|
@ -45,7 +43,7 @@ public class ReadSTDERRTaskTest {
|
|||
|
||||
String errorMessages = "OMG\nThis is test message\n blah blah blah \n";
|
||||
InputStream stream = new ByteArrayInputStream(errorMessages.getBytes());
|
||||
LineReaderTask<Boolean> reader = new DrainChildSTDERRTask().initialize(stream, shardId, "");
|
||||
LineReaderTask<Boolean> reader = new DrainChildSTDERRTask().initialize(stream, SHARD_ID, "");
|
||||
Assert.assertNotNull(reader);
|
||||
}
|
||||
|
||||
|
|
@ -54,7 +52,7 @@ public class ReadSTDERRTaskTest {
|
|||
String errorMessages = "OMG\nThis is test message\n blah blah blah \n";
|
||||
BufferedReader bufferReader =
|
||||
new BufferedReader(new InputStreamReader(new ByteArrayInputStream(errorMessages.getBytes())));
|
||||
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(bufferReader, shardId, "");
|
||||
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(bufferReader, SHARD_ID, "");
|
||||
Assert.assertNotNull(errorReader);
|
||||
|
||||
Boolean result = errorReader.call();
|
||||
|
|
@ -67,7 +65,7 @@ public class ReadSTDERRTaskTest {
|
|||
} catch (IOException e) {
|
||||
Assert.fail("Not supposed to get an exception when we're just building our mock.");
|
||||
}
|
||||
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(mockBufferReader, shardId, "");
|
||||
LineReaderTask<Boolean> errorReader = new DrainChildSTDERRTask().initialize(mockBufferReader, SHARD_ID, "");
|
||||
Assert.assertNotNull(errorReader);
|
||||
Future<Boolean> result = Executors.newCachedThreadPool().submit(errorReader);
|
||||
Boolean finishedCleanly = null;
|
||||
|
|
|
|||
|
|
@ -14,12 +14,9 @@
|
|||
*/
|
||||
package software.amazon.kinesis.multilang;
|
||||
|
||||
import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import software.amazon.kinesis.multilang.MultiLangRecordProcessorFactory;
|
||||
import software.amazon.kinesis.multilang.MultiLangShardRecordProcessor;
|
||||
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
||||
import org.junit.runner.RunWith;
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ import static org.mockito.Mockito.times;
|
|||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Collections;
|
||||
|
|
@ -46,9 +45,7 @@ import org.mockito.stubbing.Answer;
|
|||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import software.amazon.awssdk.services.kinesis.model.Record;
|
||||
import software.amazon.kinesis.exceptions.InvalidStateException;
|
||||
import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException;
|
||||
import software.amazon.kinesis.exceptions.ShutdownException;
|
||||
import software.amazon.kinesis.exceptions.ThrottlingException;
|
||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||
import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
|
||||
|
|
@ -67,7 +64,7 @@ import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
|||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class StreamingShardRecordProcessorTest {
|
||||
|
||||
private static final String shardId = "shard-123";
|
||||
private static final String SHARD_ID = "shard-123";
|
||||
|
||||
private int systemExitCount = 0;
|
||||
|
||||
|
|
@ -79,77 +76,73 @@ public class StreamingShardRecordProcessorTest {
|
|||
private RecordProcessorCheckpointer unimplementedCheckpointer = new RecordProcessorCheckpointer() {
|
||||
|
||||
@Override
|
||||
public void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException,
|
||||
ThrottlingException, ShutdownException {
|
||||
public void checkpoint() throws KinesisClientLibDependencyException, ThrottlingException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkpoint(String sequenceNumber) throws KinesisClientLibDependencyException,
|
||||
InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
||||
ThrottlingException, IllegalArgumentException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkpoint(Record record)
|
||||
throws KinesisClientLibDependencyException,
|
||||
InvalidStateException, ThrottlingException, ShutdownException {
|
||||
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkpoint(String sequenceNumber, long subSequenceNumber)
|
||||
throws KinesisClientLibDependencyException,
|
||||
InvalidStateException, ThrottlingException, ShutdownException,
|
||||
IllegalArgumentException {
|
||||
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint()
|
||||
throws KinesisClientLibDependencyException,
|
||||
InvalidStateException, ThrottlingException, ShutdownException {
|
||||
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint(byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||
public PreparedCheckpointer prepareCheckpoint(byte[] applicationState)
|
||||
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint(Record record)
|
||||
throws KinesisClientLibDependencyException,
|
||||
InvalidStateException, ThrottlingException, ShutdownException {
|
||||
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||
public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState)
|
||||
throws KinesisClientLibDependencyException, ThrottlingException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber)
|
||||
throws KinesisClientLibDependencyException,
|
||||
InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
||||
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState)
|
||||
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber)
|
||||
throws KinesisClientLibDependencyException,
|
||||
InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
||||
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException {
|
||||
public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState)
|
||||
throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
|
|
@ -171,7 +164,7 @@ public class StreamingShardRecordProcessorTest {
|
|||
private MultiLangDaemonConfiguration configuration;
|
||||
|
||||
@Before
|
||||
public void prepare() throws IOException, InterruptedException, ExecutionException {
|
||||
public void prepare() throws InterruptedException, ExecutionException {
|
||||
// Fake command
|
||||
systemExitCount = 0;
|
||||
|
||||
|
|
@ -230,7 +223,7 @@ public class StreamingShardRecordProcessorTest {
|
|||
|
||||
List<KinesisClientRecord> testRecords = Collections.emptyList();
|
||||
|
||||
recordProcessor.initialize(InitializationInput.builder().shardId(shardId).build());
|
||||
recordProcessor.initialize(InitializationInput.builder().shardId(SHARD_ID).build());
|
||||
recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords)
|
||||
.checkpointer(unimplementedCheckpointer).build());
|
||||
recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords)
|
||||
|
|
@ -240,7 +233,6 @@ public class StreamingShardRecordProcessorTest {
|
|||
|
||||
@Test
|
||||
public void processorPhasesTest() throws InterruptedException, ExecutionException {
|
||||
|
||||
Answer<StatusMessage> answer = new Answer<StatusMessage>() {
|
||||
|
||||
StatusMessage[] answers = new StatusMessage[] { new StatusMessage(InitializeMessage.ACTION),
|
||||
|
|
@ -263,7 +255,7 @@ public class StreamingShardRecordProcessorTest {
|
|||
|
||||
verify(messageWriter)
|
||||
.writeInitializeMessage(argThat(Matchers.withInit(
|
||||
InitializationInput.builder().shardId(shardId).build())));
|
||||
InitializationInput.builder().shardId(SHARD_ID).build())));
|
||||
verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class));
|
||||
verify(messageWriter).writeLeaseLossMessage(any(LeaseLostInput.class));
|
||||
}
|
||||
|
|
@ -295,7 +287,7 @@ public class StreamingShardRecordProcessorTest {
|
|||
phases(answer);
|
||||
|
||||
verify(messageWriter).writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder()
|
||||
.shardId(shardId).build())));
|
||||
.shardId(SHARD_ID).build())));
|
||||
verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class));
|
||||
verify(messageWriter, never()).writeLeaseLossMessage(any(LeaseLostInput.class));
|
||||
Assert.assertEquals(1, systemExitCount);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package software.amazon.kinesis.multilang.auth;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
public class KclSTSAssumeRoleSessionCredentialsProviderTest {
|
||||
|
||||
private static final String ARN = "arn";
|
||||
private static final String SESSION_NAME = "sessionName";
|
||||
|
||||
/**
|
||||
* Test that the constructor doesn't throw an out-of-bounds exception if
|
||||
* there are no parameters beyond the required ARN and session name.
|
||||
*/
|
||||
@Test
|
||||
public void testConstructorWithoutOptionalParams() {
|
||||
new KclSTSAssumeRoleSessionCredentialsProvider(new String[] { ARN, SESSION_NAME });
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAcceptEndpoint() {
|
||||
// discovered exception during e2e testing; therefore, this test is
|
||||
// to simply verify the constructed STS client doesn't go *boom*
|
||||
final KclSTSAssumeRoleSessionCredentialsProvider provider =
|
||||
new KclSTSAssumeRoleSessionCredentialsProvider(ARN, SESSION_NAME);
|
||||
provider.acceptEndpoint("endpoint", "us-east-1");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVarArgs() {
|
||||
for (final String[] varargs : Arrays.asList(
|
||||
new String[] { ARN, SESSION_NAME, "externalId=eid", "foo"},
|
||||
new String[] { ARN, SESSION_NAME, "foo", "externalId=eid"}
|
||||
)) {
|
||||
final VarArgsSpy provider = new VarArgsSpy(varargs);
|
||||
assertEquals("eid", provider.externalId);
|
||||
}
|
||||
}
|
||||
|
||||
private static class VarArgsSpy extends KclSTSAssumeRoleSessionCredentialsProvider {
|
||||
|
||||
private String externalId;
|
||||
|
||||
public VarArgsSpy(String[] args) {
|
||||
super(args);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void acceptExternalId(final String externalId) {
|
||||
this.externalId = externalId;
|
||||
super.acceptExternalId(externalId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -17,6 +17,7 @@ package software.amazon.kinesis.multilang.config;
|
|||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
|
@ -27,24 +28,20 @@ import org.hamcrest.Description;
|
|||
import org.hamcrest.Matcher;
|
||||
import org.hamcrest.TypeSafeDiagnosingMatcher;
|
||||
import org.junit.Test;
|
||||
import software.amazon.kinesis.multilang.auth.KclSTSAssumeRoleSessionCredentialsProvider;
|
||||
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.auth.AWSCredentialsProviderChain;
|
||||
|
||||
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
|
||||
import software.amazon.awssdk.auth.credentials.AwsCredentials;
|
||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain;
|
||||
|
||||
public class AWSCredentialsProviderPropertyValueDecoderTest {
|
||||
|
||||
private static final String TEST_ACCESS_KEY_ID = "123";
|
||||
private static final String TEST_SECRET_KEY = "456";
|
||||
|
||||
private String credentialName1 = "software.amazon.kinesis.multilang.config.AWSCredentialsProviderPropertyValueDecoderTest$AlwaysSucceedCredentialsProvider";
|
||||
private String credentialName2 = "software.amazon.kinesis.multilang.config.AWSCredentialsProviderPropertyValueDecoderTest$ConstructorCredentialsProvider";
|
||||
private AWSCredentialsProviderPropertyValueDecoder decoder = new AWSCredentialsProviderPropertyValueDecoder();
|
||||
private final String credentialName1 = AlwaysSucceedCredentialsProvider.class.getName();
|
||||
private final String credentialName2 = ConstructorCredentialsProvider.class.getName();
|
||||
private final AWSCredentialsProviderPropertyValueDecoder decoder = new AWSCredentialsProviderPropertyValueDecoder();
|
||||
|
||||
@ToString
|
||||
private static class AWSCredentialsMatcher extends TypeSafeDiagnosingMatcher<AWSCredentialsProvider> {
|
||||
|
|
@ -59,10 +56,6 @@ public class AWSCredentialsProviderPropertyValueDecoderTest {
|
|||
this.classMatcher = instanceOf(AWSCredentialsProviderChain.class);
|
||||
}
|
||||
|
||||
private AWSCredentialsMatcher(AWSCredentials expected) {
|
||||
this(expected.getAWSAccessKeyId(), expected.getAWSSecretKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean matchesSafely(AWSCredentialsProvider item, Description mismatchDescription) {
|
||||
AWSCredentials actual = item.getCredentials();
|
||||
|
|
@ -120,6 +113,33 @@ public class AWSCredentialsProviderPropertyValueDecoderTest {
|
|||
assertThat(provider, hasCredentials("arg1", "arg2"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that providers in the multi-lang auth package can be resolved and instantiated.
|
||||
*/
|
||||
@Test
|
||||
public void testKclAuthProvider() {
|
||||
for (final String className : Arrays.asList(
|
||||
KclSTSAssumeRoleSessionCredentialsProvider.class.getName(), // fully-qualified name
|
||||
KclSTSAssumeRoleSessionCredentialsProvider.class.getSimpleName() // name-only; needs prefix
|
||||
)) {
|
||||
final AWSCredentialsProvider provider = decoder.decodeValue(className + "|arn|sessionName");
|
||||
assertNotNull(className, provider);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that a provider can be instantiated by its varargs constructor.
|
||||
*/
|
||||
@Test
|
||||
public void testVarArgAuthProvider() {
|
||||
final String[] args = new String[] { "arg1", "arg2", "arg3" };
|
||||
final String className = VarArgCredentialsProvider.class.getName();
|
||||
final String encodedValue = className + "|" + String.join("|", args);
|
||||
|
||||
final AWSCredentialsProvider provider = decoder.decodeValue(encodedValue);
|
||||
assertEquals(Arrays.toString(args), provider.getCredentials().getAWSAccessKeyId());
|
||||
}
|
||||
|
||||
/**
|
||||
* This credentials provider will always succeed
|
||||
*/
|
||||
|
|
@ -144,9 +164,9 @@ public class AWSCredentialsProviderPropertyValueDecoderTest {
|
|||
private String arg1;
|
||||
private String arg2;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public ConstructorCredentialsProvider(String arg1) {
|
||||
this.arg1 = arg1;
|
||||
this.arg2 = "blank";
|
||||
this(arg1, "blank");
|
||||
}
|
||||
|
||||
public ConstructorCredentialsProvider(String arg1, String arg2) {
|
||||
|
|
@ -164,4 +184,25 @@ public class AWSCredentialsProviderPropertyValueDecoderTest {
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
private static class VarArgCredentialsProvider implements AWSCredentialsProvider {
|
||||
|
||||
private final String[] args;
|
||||
|
||||
public VarArgCredentialsProvider(final String[] args) {
|
||||
this.args = args;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AWSCredentials getCredentials() {
|
||||
// KISS solution to surface the constructor args
|
||||
final String flattenedArgs = Arrays.toString(args);
|
||||
return new BasicAWSCredentials(flattenedArgs, flattenedArgs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void refresh() {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,35 +36,25 @@ import com.amazonaws.auth.AWSCredentialsProvider;
|
|||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.exception.ExceptionUtils;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
|
||||
import org.junit.rules.ExpectedException;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
||||
import software.amazon.kinesis.common.InitialPositionInStream;
|
||||
import software.amazon.kinesis.metrics.MetricsLevel;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class KinesisClientLibConfiguratorTest {
|
||||
|
||||
private String credentialName1 = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProvider";
|
||||
private String credentialName2 = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysFailCredentialsProvider";
|
||||
private String credentialNameKinesis = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderKinesis";
|
||||
private String credentialNameDynamoDB = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderDynamoDB";
|
||||
private String credentialNameCloudWatch = "software.amazon.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderCloudWatch";
|
||||
private KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator();
|
||||
|
||||
@Rule
|
||||
public final ExpectedException thrown = ExpectedException.none();
|
||||
|
||||
@Mock
|
||||
private ShardRecordProcessorFactory shardRecordProcessorFactory;
|
||||
private final String credentialName1 = AlwaysSucceedCredentialsProvider.class.getName();
|
||||
private final String credentialName2 = AlwaysFailCredentialsProvider.class.getName();
|
||||
private final String credentialNameKinesis = AlwaysSucceedCredentialsProviderKinesis.class.getName();
|
||||
private final String credentialNameDynamoDB = AlwaysSucceedCredentialsProviderDynamoDB.class.getName();
|
||||
private final String credentialNameCloudWatch = AlwaysSucceedCredentialsProviderCloudWatch.class.getName();
|
||||
private final KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator();
|
||||
|
||||
@Test
|
||||
public void testWithBasicSetup() {
|
||||
|
|
@ -241,54 +231,32 @@ public class KinesisClientLibConfiguratorTest {
|
|||
"AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123",
|
||||
"initialPositionInStream = TriM_Horizon", "maxGetRecordsThreadPool = 0",
|
||||
"retryGetRecordsInSeconds = 0" }, '\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
|
||||
try {
|
||||
configurator.getConfiguration(input);
|
||||
} catch (Exception e) {
|
||||
fail("Don't expect to fail on invalid variable value");
|
||||
|
||||
}
|
||||
getConfiguration(test);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWithInvalidIntValue() {
|
||||
String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b",
|
||||
"AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = 100nf" }, '\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
|
||||
try {
|
||||
configurator.getConfiguration(input);
|
||||
} catch (Exception e) {
|
||||
fail("Don't expect to fail on invalid variable value");
|
||||
}
|
||||
getConfiguration(test);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWithNegativeIntValue() {
|
||||
String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b",
|
||||
"AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = -12" }, '\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
|
||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||
try {
|
||||
configurator.getConfiguration(input);
|
||||
} catch (Exception e) {
|
||||
fail("Don't expect to fail on invalid variable value");
|
||||
}
|
||||
getConfiguration(test);
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testWithMissingCredentialsProvider() {
|
||||
thrown.expect(IllegalArgumentException.class);
|
||||
thrown.expectMessage("A basic set of AWS credentials must be provided");
|
||||
|
||||
String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", "workerId = 123",
|
||||
"failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, '\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
|
||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||
configurator.getConfiguration(input);
|
||||
getConfiguration(test);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -297,35 +265,42 @@ public class KinesisClientLibConfiguratorTest {
|
|||
new String[] { "streamName = a", "applicationName = b", "AWSCredentialsProvider = " + credentialName1,
|
||||
"failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" },
|
||||
'\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
MultiLangDaemonConfiguration config = configurator.getConfiguration(input);
|
||||
MultiLangDaemonConfiguration config = getConfiguration(test);
|
||||
|
||||
// if workerId is not provided, configurator should assign one for it automatically
|
||||
assertNotNull(config.getWorkerIdentifier());
|
||||
assertFalse(config.getWorkerIdentifier().isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWithMissingStreamName() {
|
||||
thrown.expect(NullPointerException.class);
|
||||
thrown.expectMessage("Stream name is required");
|
||||
|
||||
String test = StringUtils.join(new String[] { "applicationName = b",
|
||||
"AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = 100" }, '\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
|
||||
configurator.getConfiguration(input);
|
||||
@Test(expected = NullPointerException.class)
|
||||
public void testWithMissingStreamNameAndMissingStreamArn() {
|
||||
String test = StringUtils.join(new String[] {
|
||||
"applicationName = b",
|
||||
"AWSCredentialsProvider = " + credentialName1,
|
||||
"workerId = 123",
|
||||
"failoverTimeMillis = 100" },
|
||||
'\n');
|
||||
getConfiguration(test);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWithMissingApplicationName() {
|
||||
thrown.expect(NullPointerException.class);
|
||||
thrown.expectMessage("Application name is required");
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testWithEmptyStreamNameAndMissingStreamArn() {
|
||||
String test = StringUtils.join(new String[] {
|
||||
"applicationName = b",
|
||||
"AWSCredentialsProvider = " + credentialName1,
|
||||
"workerId = 123",
|
||||
"failoverTimeMillis = 100",
|
||||
"streamName = ",
|
||||
"streamArn = "},
|
||||
'\n');
|
||||
getConfiguration(test);
|
||||
}
|
||||
|
||||
@Test(expected = NullPointerException.class)
|
||||
public void testWithMissingApplicationName() {
|
||||
String test = StringUtils.join(new String[] { "streamName = a", "AWSCredentialsProvider = " + credentialName1,
|
||||
"workerId = 123", "failoverTimeMillis = 100" }, '\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
configurator.getConfiguration(input);
|
||||
getConfiguration(test);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -334,11 +309,10 @@ public class KinesisClientLibConfiguratorTest {
|
|||
new String[] { "streamName = a", "applicationName = b", "AWSCredentialsProvider = " + credentialName2,
|
||||
"failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" },
|
||||
'\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
MultiLangDaemonConfiguration config = getConfiguration(test);
|
||||
|
||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||
try {
|
||||
MultiLangDaemonConfiguration config = configurator.getConfiguration(input);
|
||||
config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
fail("expect failure with wrong credentials provider");
|
||||
} catch (Exception e) {
|
||||
|
|
@ -354,25 +328,12 @@ public class KinesisClientLibConfiguratorTest {
|
|||
"AWSCredentialsProviderDynamoDB = " + credentialNameDynamoDB,
|
||||
"AWSCredentialsProviderCloudWatch = " + credentialNameCloudWatch, "failoverTimeMillis = 100",
|
||||
"shardSyncIntervalMillis = 500" }, '\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
|
||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||
MultiLangDaemonConfiguration config = configurator.getConfiguration(input);
|
||||
try {
|
||||
config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
} catch (Exception e) {
|
||||
fail("Kinesis credential providers should not fail.");
|
||||
}
|
||||
try {
|
||||
config.getDynamoDBCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
} catch (Exception e) {
|
||||
fail("DynamoDB credential providers should not fail.");
|
||||
}
|
||||
try {
|
||||
config.getCloudWatchCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
} catch (Exception e) {
|
||||
fail("CloudWatch credential providers should not fail.");
|
||||
}
|
||||
final MultiLangDaemonConfiguration config = getConfiguration(test);
|
||||
config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
config.getDynamoDBCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
config.getCloudWatchCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
}
|
||||
|
||||
// TODO: fix this test
|
||||
|
|
@ -383,17 +344,10 @@ public class KinesisClientLibConfiguratorTest {
|
|||
"AWSCredentialsProviderDynamoDB = " + credentialName2,
|
||||
"AWSCredentialsProviderCloudWatch = " + credentialName2, "failoverTimeMillis = 100",
|
||||
"shardSyncIntervalMillis = 500" }, '\n');
|
||||
InputStream input = new ByteArrayInputStream(test.getBytes());
|
||||
|
||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||
|
||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
||||
MultiLangDaemonConfiguration config = configurator.getConfiguration(input);
|
||||
try {
|
||||
config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
} catch (Exception e) {
|
||||
fail("Kinesis credential providers should not fail.");
|
||||
}
|
||||
final MultiLangDaemonConfiguration config = getConfiguration(test);
|
||||
config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
try {
|
||||
config.getDynamoDBCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials();
|
||||
fail("DynamoDB credential providers should fail.");
|
||||
|
|
@ -490,7 +444,6 @@ public class KinesisClientLibConfiguratorTest {
|
|||
|
||||
private MultiLangDaemonConfiguration getConfiguration(String configString) {
|
||||
InputStream input = new ByteArrayInputStream(configString.getBytes());
|
||||
MultiLangDaemonConfiguration config = configurator.getConfiguration(input);
|
||||
return config;
|
||||
return configurator.getConfiguration(input);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -26,13 +26,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
||||
import software.amazon.kinesis.lifecycle.ShutdownReason;
|
||||
import software.amazon.kinesis.multilang.messages.CheckpointMessage;
|
||||
import software.amazon.kinesis.multilang.messages.InitializeMessage;
|
||||
import software.amazon.kinesis.multilang.messages.Message;
|
||||
import software.amazon.kinesis.multilang.messages.ProcessRecordsMessage;
|
||||
import software.amazon.kinesis.multilang.messages.ShutdownMessage;
|
||||
import software.amazon.kinesis.multilang.messages.ShutdownRequestedMessage;
|
||||
import software.amazon.kinesis.multilang.messages.StatusMessage;
|
||||
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
||||
|
||||
public class MessageTest {
|
||||
|
|
@ -56,7 +49,7 @@ public class MessageTest {
|
|||
new ProcessRecordsMessage(),
|
||||
new ShutdownRequestedMessage(),
|
||||
new LeaseLostMessage(),
|
||||
new ShardEndedMessage()
|
||||
new ShardEndedMessage(),
|
||||
};
|
||||
|
||||
// TODO: fix this
|
||||
|
|
|
|||
|
|
@ -0,0 +1,93 @@
|
|||
# The script that abides by the multi-language protocol. This script will
|
||||
# be executed by the MultiLangDaemon, which will communicate with this script
|
||||
# over STDIN and STDOUT according to the multi-language protocol.
|
||||
executableName = sample_kclpy_app.py
|
||||
|
||||
# The Stream arn: arn:aws:kinesis:<region>:<account id>:stream/<stream name>
|
||||
# Important: streamArn takes precedence over streamName if both are set
|
||||
streamArn = arn:aws:kinesis:us-east-5:000000000000:stream/kclpysample
|
||||
|
||||
# The name of an Amazon Kinesis stream to process.
|
||||
# Important: streamArn takes precedence over streamName if both are set
|
||||
streamName = kclpysample
|
||||
|
||||
# Used by the KCL as the name of this application. Will be used as the name
|
||||
# of an Amazon DynamoDB table which will store the lease and checkpoint
|
||||
# information for workers with this application name
|
||||
applicationName = MultiLangTest
|
||||
|
||||
# Users can change the credentials provider the KCL will use to retrieve credentials.
|
||||
# The DefaultAWSCredentialsProviderChain checks several other providers, which is
|
||||
# described here:
|
||||
# http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/DefaultAWSCredentialsProviderChain.html
|
||||
AWSCredentialsProvider = DefaultAWSCredentialsProviderChain
|
||||
|
||||
# Appended to the user agent of the KCL. Does not impact the functionality of the
|
||||
# KCL in any other way.
|
||||
processingLanguage = python/3.8
|
||||
|
||||
# Valid options at TRIM_HORIZON or LATEST.
|
||||
# See http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#API_GetShardIterator_RequestSyntax
|
||||
initialPositionInStream = TRIM_HORIZON
|
||||
|
||||
# To specify an initial timestamp from which to start processing records, please specify timestamp value for 'initiatPositionInStreamExtended',
|
||||
# and uncomment below line with right timestamp value.
|
||||
# See more from 'Timestamp' under http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#API_GetShardIterator_RequestSyntax
|
||||
#initialPositionInStreamExtended = 1636609142
|
||||
|
||||
# The following properties are also available for configuring the KCL Worker that is created
|
||||
# by the MultiLangDaemon.
|
||||
|
||||
# The KCL defaults to us-east-1
|
||||
regionName = us-east-1
|
||||
|
||||
# Fail over time in milliseconds. A worker which does not renew it's lease within this time interval
|
||||
# will be regarded as having problems and it's shards will be assigned to other workers.
|
||||
# For applications that have a large number of shards, this msy be set to a higher number to reduce
|
||||
# the number of DynamoDB IOPS required for tracking leases
|
||||
failoverTimeMillis = 10000
|
||||
|
||||
# A worker id that uniquely identifies this worker among all workers using the same applicationName
|
||||
# If this isn't provided a MultiLangDaemon instance will assign a unique workerId to itself.
|
||||
workerId = "workerId"
|
||||
|
||||
# Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks.
|
||||
shardSyncIntervalMillis = 60000
|
||||
|
||||
# Max records to fetch from Kinesis in a single GetRecords call.
|
||||
maxRecords = 10000
|
||||
|
||||
# Idle time between record reads in milliseconds.
|
||||
idleTimeBetweenReadsInMillis = 1000
|
||||
|
||||
# Enables applications flush/checkpoint (if they have some data "in progress", but don't get new data for while)
|
||||
callProcessRecordsEvenForEmptyRecordList = false
|
||||
|
||||
# Interval in milliseconds between polling to check for parent shard completion.
|
||||
# Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on
|
||||
# completion of parent shards).
|
||||
parentShardPollIntervalMillis = 10000
|
||||
|
||||
# Cleanup leases upon shards completion (don't wait until they expire in Kinesis).
|
||||
# Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by default we try
|
||||
# to delete the ones we don't need any longer.
|
||||
cleanupLeasesUponShardCompletion = true
|
||||
|
||||
# Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures).
|
||||
taskBackoffTimeMillis = 500
|
||||
|
||||
# Buffer metrics for at most this long before publishing to CloudWatch.
|
||||
metricsBufferTimeMillis = 10000
|
||||
|
||||
# Buffer at most this many metrics before publishing to CloudWatch.
|
||||
metricsMaxQueueSize = 10000
|
||||
|
||||
# KCL will validate client provided sequence numbers with a call to Amazon Kinesis before checkpointing for calls
|
||||
# to RecordProcessorCheckpointer#checkpoint(String) by default.
|
||||
validateSequenceNumberBeforeCheckpointing = true
|
||||
|
||||
# The maximum number of active threads for the MultiLangDaemon to permit.
|
||||
# If a value is provided then a FixedThreadPool is used with the maximum
|
||||
# active threads set to the provided value. If a non-positive integer or no
|
||||
# value is provided a CachedThreadPool is used.
|
||||
maxActiveThreads = -1
|
||||
|
|
@ -22,7 +22,7 @@
|
|||
<parent>
|
||||
<groupId>software.amazon.kinesis</groupId>
|
||||
<artifactId>amazon-kinesis-client-pom</artifactId>
|
||||
<version>2.4.3</version>
|
||||
<version>2.5.3-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>amazon-kinesis-client</artifactId>
|
||||
|
|
@ -50,8 +50,9 @@
|
|||
<sqlite4java.version>1.0.392</sqlite4java.version>
|
||||
<sqlite4java.native>libsqlite4java</sqlite4java.native>
|
||||
<sqlite4java.libpath>${project.build.directory}/test-lib</sqlite4java.libpath>
|
||||
<slf4j.version>2.0.0</slf4j.version>
|
||||
<gsr.version>1.1.13</gsr.version>
|
||||
<slf4j.version>2.0.7</slf4j.version>
|
||||
<gsr.version>1.1.14</gsr.version>
|
||||
<skipITs>true</skipITs>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
|
|
@ -88,12 +89,12 @@
|
|||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>31.1-jre</version>
|
||||
<version>32.1.1-jre</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java</artifactId>
|
||||
<version>3.21.5</version>
|
||||
<version>3.21.12</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
|
|
@ -109,7 +110,7 @@
|
|||
<dependency>
|
||||
<groupId>io.reactivex.rxjava3</groupId>
|
||||
<artifactId>rxjava</artifactId>
|
||||
<version>3.1.5</version>
|
||||
<version>3.1.6</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
@ -183,7 +184,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.8.1</version>
|
||||
<version>3.11.0</version>
|
||||
<configuration>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
|
|
@ -199,6 +200,7 @@
|
|||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>2.22.2</version>
|
||||
<configuration>
|
||||
<skipITs>${skipITs}</skipITs>
|
||||
<excludes>
|
||||
<exclude>**/*IntegrationTest.java</exclude>
|
||||
</excludes>
|
||||
|
|
@ -207,6 +209,10 @@
|
|||
<name>sqlite4java.library.path</name>
|
||||
<value>${sqlite4java.libpath}</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>awsProfile</name>
|
||||
<value>${awsProfile}</value>
|
||||
</property>
|
||||
</systemProperties>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
|
@ -299,7 +305,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<version>3.3.1</version>
|
||||
<version>3.5.0</version>
|
||||
<configuration>
|
||||
<excludePackageNames>com.amazonaws.services.kinesis.producer.protobuf</excludePackageNames>
|
||||
</configuration>
|
||||
|
|
|
|||
|
|
@ -144,7 +144,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi
|
|||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint(byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||
public PreparedCheckpointer prepareCheckpoint(byte[] applicationState)
|
||||
throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||
return prepareCheckpoint(largestPermittedCheckpointValue.sequenceNumber(), applicationState);
|
||||
}
|
||||
|
||||
|
|
@ -152,7 +153,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi
|
|||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||
public PreparedCheckpointer prepareCheckpoint(Record record, byte[] applicationState)
|
||||
throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
|
||||
//
|
||||
// TODO: UserRecord Deprecation
|
||||
//
|
||||
|
|
|
|||
|
|
@ -103,7 +103,8 @@ public class DynamoDBCheckpointer implements Checkpointer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, byte[] pendingCheckpointState) throws KinesisClientLibException {
|
||||
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken,
|
||||
byte[] pendingCheckpointState) throws KinesisClientLibException {
|
||||
try {
|
||||
boolean wasSuccessful =
|
||||
prepareCheckpoint(leaseKey, pendingCheckpoint, UUID.fromString(concurrencyToken), pendingCheckpointState);
|
||||
|
|
|
|||
|
|
@ -15,6 +15,8 @@
|
|||
|
||||
package software.amazon.kinesis.common;
|
||||
|
||||
import java.util.function.Function;
|
||||
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
|
|
@ -23,6 +25,7 @@ import org.apache.commons.lang3.StringUtils;
|
|||
|
||||
import lombok.NonNull;
|
||||
import lombok.experimental.Accessors;
|
||||
import software.amazon.awssdk.arns.Arn;
|
||||
import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
|
||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
||||
|
|
@ -35,6 +38,8 @@ import software.amazon.kinesis.metrics.MetricsConfig;
|
|||
import software.amazon.kinesis.processor.ProcessorConfig;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
||||
import software.amazon.kinesis.processor.MultiStreamTracker;
|
||||
import software.amazon.kinesis.processor.SingleStreamTracker;
|
||||
import software.amazon.kinesis.processor.StreamTracker;
|
||||
import software.amazon.kinesis.retrieval.RetrievalConfig;
|
||||
|
||||
/**
|
||||
|
|
@ -46,9 +51,18 @@ public class ConfigsBuilder {
|
|||
/**
|
||||
* Either the name of the stream to consume records from
|
||||
* Or MultiStreamTracker for all the streams to consume records from
|
||||
*
|
||||
* @deprecated Both single- and multi-stream support is now provided by {@link StreamTracker}.
|
||||
* @see #streamTracker
|
||||
*/
|
||||
@Deprecated
|
||||
private Either<MultiStreamTracker, String> appStreamTracker;
|
||||
|
||||
/**
|
||||
* Stream(s) to be consumed by this KCL application.
|
||||
*/
|
||||
private StreamTracker streamTracker;
|
||||
|
||||
/**
|
||||
* Application name for the KCL Worker
|
||||
*/
|
||||
|
|
@ -115,7 +129,8 @@ public class ConfigsBuilder {
|
|||
}
|
||||
|
||||
/**
|
||||
* Constructor to initialize ConfigsBuilder with StreamName
|
||||
* Constructor to initialize ConfigsBuilder for a single stream identified by name.
|
||||
*
|
||||
* @param streamName
|
||||
* @param applicationName
|
||||
* @param kinesisClient
|
||||
|
|
@ -128,18 +143,19 @@ public class ConfigsBuilder {
|
|||
@NonNull KinesisAsyncClient kinesisClient, @NonNull DynamoDbAsyncClient dynamoDBClient,
|
||||
@NonNull CloudWatchAsyncClient cloudWatchClient, @NonNull String workerIdentifier,
|
||||
@NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) {
|
||||
this.appStreamTracker = Either.right(streamName);
|
||||
this.applicationName = applicationName;
|
||||
this.kinesisClient = kinesisClient;
|
||||
this.dynamoDBClient = dynamoDBClient;
|
||||
this.cloudWatchClient = cloudWatchClient;
|
||||
this.workerIdentifier = workerIdentifier;
|
||||
this.shardRecordProcessorFactory = shardRecordProcessorFactory;
|
||||
this(new SingleStreamTracker(streamName),
|
||||
applicationName,
|
||||
kinesisClient,
|
||||
dynamoDBClient,
|
||||
cloudWatchClient,
|
||||
workerIdentifier,
|
||||
shardRecordProcessorFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor to initialize ConfigsBuilder with MultiStreamTracker
|
||||
* @param multiStreamTracker
|
||||
* Constructor to initialize ConfigsBuilder for a single stream identified by {@link Arn}.
|
||||
*
|
||||
* @param streamArn
|
||||
* @param applicationName
|
||||
* @param kinesisClient
|
||||
* @param dynamoDBClient
|
||||
|
|
@ -147,17 +163,54 @@ public class ConfigsBuilder {
|
|||
* @param workerIdentifier
|
||||
* @param shardRecordProcessorFactory
|
||||
*/
|
||||
public ConfigsBuilder(@NonNull MultiStreamTracker multiStreamTracker, @NonNull String applicationName,
|
||||
public ConfigsBuilder(@NonNull Arn streamArn, @NonNull String applicationName,
|
||||
@NonNull KinesisAsyncClient kinesisClient, @NonNull DynamoDbAsyncClient dynamoDBClient,
|
||||
@NonNull CloudWatchAsyncClient cloudWatchClient, @NonNull String workerIdentifier,
|
||||
@NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) {
|
||||
this(new SingleStreamTracker(streamArn),
|
||||
applicationName,
|
||||
kinesisClient,
|
||||
dynamoDBClient,
|
||||
cloudWatchClient,
|
||||
workerIdentifier,
|
||||
shardRecordProcessorFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor to initialize ConfigsBuilder
|
||||
*
|
||||
* @param streamTracker tracker for single- or multi-stream processing
|
||||
* @param applicationName
|
||||
* @param kinesisClient
|
||||
* @param dynamoDBClient
|
||||
* @param cloudWatchClient
|
||||
* @param workerIdentifier
|
||||
* @param shardRecordProcessorFactory
|
||||
*/
|
||||
public ConfigsBuilder(@NonNull StreamTracker streamTracker, @NonNull String applicationName,
|
||||
@NonNull KinesisAsyncClient kinesisClient, @NonNull DynamoDbAsyncClient dynamoDBClient,
|
||||
@NonNull CloudWatchAsyncClient cloudWatchClient, @NonNull String workerIdentifier,
|
||||
@NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) {
|
||||
this.appStreamTracker = Either.left(multiStreamTracker);
|
||||
this.applicationName = applicationName;
|
||||
this.kinesisClient = kinesisClient;
|
||||
this.dynamoDBClient = dynamoDBClient;
|
||||
this.cloudWatchClient = cloudWatchClient;
|
||||
this.workerIdentifier = workerIdentifier;
|
||||
this.shardRecordProcessorFactory = shardRecordProcessorFactory;
|
||||
|
||||
// construct both streamTracker and appStreamTracker
|
||||
streamTracker(streamTracker);
|
||||
}
|
||||
|
||||
public void appStreamTracker(Either<MultiStreamTracker, String> appStreamTracker) {
|
||||
this.appStreamTracker = appStreamTracker;
|
||||
streamTracker = appStreamTracker.map(Function.identity(), SingleStreamTracker::new);
|
||||
}
|
||||
|
||||
public void streamTracker(StreamTracker streamTracker) {
|
||||
this.streamTracker = streamTracker;
|
||||
this.appStreamTracker = DeprecationUtils.convert(streamTracker,
|
||||
singleStreamTracker -> singleStreamTracker.streamConfigList().get(0).streamIdentifier().streamName());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -205,7 +258,6 @@ public class ConfigsBuilder {
|
|||
return new MetricsConfig(cloudWatchClient(), namespace());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new instance of ProcessorConfig
|
||||
*
|
||||
|
|
@ -221,10 +273,6 @@ public class ConfigsBuilder {
|
|||
* @return RetrievalConfig
|
||||
*/
|
||||
public RetrievalConfig retrievalConfig() {
|
||||
final RetrievalConfig retrievalConfig =
|
||||
appStreamTracker.map(
|
||||
multiStreamTracker -> new RetrievalConfig(kinesisClient(), multiStreamTracker, applicationName()),
|
||||
streamName -> new RetrievalConfig(kinesisClient(), streamName, applicationName()));
|
||||
return retrievalConfig;
|
||||
return new RetrievalConfig(kinesisClient(), streamTracker(), applicationName());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package software.amazon.kinesis.common;
|
||||
|
||||
import java.util.function.Function;
|
||||
|
||||
import software.amazon.awssdk.utils.Either;
|
||||
import software.amazon.kinesis.processor.MultiStreamTracker;
|
||||
import software.amazon.kinesis.processor.SingleStreamTracker;
|
||||
import software.amazon.kinesis.processor.StreamTracker;
|
||||
|
||||
/**
|
||||
* Utility methods to facilitate deprecated code until that deprecated code
|
||||
* can be safely removed.
|
||||
*/
|
||||
public final class DeprecationUtils {
|
||||
|
||||
private DeprecationUtils() {
|
||||
throw new UnsupportedOperationException("utility class");
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a {@link StreamTracker} into the deprecated {@code Either<L, R>} convention.
|
||||
*
|
||||
* @param streamTracker tracker to convert
|
||||
*/
|
||||
@Deprecated
|
||||
public static <R> Either<MultiStreamTracker, R> convert(
|
||||
StreamTracker streamTracker,
|
||||
Function<SingleStreamTracker, R> converter) {
|
||||
if (streamTracker instanceof MultiStreamTracker) {
|
||||
return Either.left((MultiStreamTracker) streamTracker);
|
||||
} else if (streamTracker instanceof SingleStreamTracker) {
|
||||
return Either.right(converter.apply((SingleStreamTracker) streamTracker));
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unhandled StreamTracker: " + streamTracker);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -23,10 +23,11 @@ import software.amazon.awssdk.services.kinesis.model.HashKeyRange;
|
|||
|
||||
import java.math.BigInteger;
|
||||
|
||||
@Value @Accessors(fluent = true)
|
||||
/**
|
||||
* Lease POJO to hold the starting hashkey range and ending hashkey range of kinesis shards.
|
||||
*/
|
||||
@Accessors(fluent = true)
|
||||
@Value
|
||||
public class HashKeyRangeForLease {
|
||||
|
||||
private final BigInteger startingHashKey;
|
||||
|
|
|
|||
|
|
@ -16,11 +16,13 @@
|
|||
package software.amazon.kinesis.common;
|
||||
|
||||
import lombok.Data;
|
||||
import lombok.NonNull;
|
||||
import lombok.experimental.Accessors;
|
||||
|
||||
@Data
|
||||
@Accessors(fluent = true)
|
||||
public class StreamConfig {
|
||||
@NonNull
|
||||
private final StreamIdentifier streamIdentifier;
|
||||
private final InitialPositionInStreamExtended initialPositionInStreamExtended;
|
||||
private String consumerArn;
|
||||
|
|
|
|||
|
|
@ -15,45 +15,69 @@
|
|||
|
||||
package software.amazon.kinesis.common;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import lombok.AccessLevel;
|
||||
import lombok.Builder;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.NonNull;
|
||||
import lombok.experimental.Accessors;
|
||||
import software.amazon.awssdk.arns.Arn;
|
||||
import software.amazon.awssdk.regions.Region;
|
||||
import software.amazon.awssdk.utils.Validate;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
@EqualsAndHashCode @Getter @Accessors(fluent = true)
|
||||
@Builder(access = AccessLevel.PRIVATE)
|
||||
@EqualsAndHashCode
|
||||
@Getter
|
||||
@Accessors(fluent = true)
|
||||
public class StreamIdentifier {
|
||||
private final Optional<String> accountIdOptional;
|
||||
|
||||
@Builder.Default
|
||||
private final Optional<String> accountIdOptional = Optional.empty();
|
||||
@NonNull
|
||||
private final String streamName;
|
||||
private final Optional<Long> streamCreationEpochOptional;
|
||||
@Builder.Default
|
||||
private final Optional<Long> streamCreationEpochOptional = Optional.empty();
|
||||
@Builder.Default
|
||||
@EqualsAndHashCode.Exclude
|
||||
private final Optional<Arn> streamArnOptional = Optional.empty();
|
||||
|
||||
private static final String DELIMITER = ":";
|
||||
private static final Pattern PATTERN = Pattern.compile(".*" + ":" + ".*" + ":" + "[0-9]*");
|
||||
/**
|
||||
* Pattern for a serialized {@link StreamIdentifier}. The valid format is
|
||||
* {@code <accountId>:<streamName>:<creationEpoch>}.
|
||||
*/
|
||||
private static final Pattern STREAM_IDENTIFIER_PATTERN = Pattern.compile(
|
||||
"(?<accountId>[0-9]+):(?<streamName>[^:]+):(?<creationEpoch>[0-9]+)");
|
||||
|
||||
private StreamIdentifier(@NonNull String accountId, @NonNull String streamName, @NonNull Long streamCreationEpoch) {
|
||||
this.accountIdOptional = Optional.of(accountId);
|
||||
this.streamName = streamName;
|
||||
this.streamCreationEpochOptional = Optional.of(streamCreationEpoch);
|
||||
}
|
||||
|
||||
private StreamIdentifier(@NonNull String streamName) {
|
||||
this.accountIdOptional = Optional.empty();
|
||||
this.streamName = streamName;
|
||||
this.streamCreationEpochOptional = Optional.empty();
|
||||
}
|
||||
/**
|
||||
* Pattern for a stream ARN. The valid format is
|
||||
* {@code arn:aws:kinesis:<region>:<accountId>:stream:<streamName>}
|
||||
* where {@code region} is the id representation of a {@link Region}.
|
||||
*/
|
||||
private static final Pattern STREAM_ARN_PATTERN = Pattern.compile(
|
||||
"arn:aws[^:]*:kinesis:(?<region>[-a-z0-9]+):(?<accountId>[0-9]{12}):stream/(?<streamName>.+)");
|
||||
|
||||
/**
|
||||
* Serialize the current StreamIdentifier instance.
|
||||
* @return
|
||||
*
|
||||
* @return a String of {@code account:stream:creationEpoch} in multi-stream mode
|
||||
* or {@link #streamName} in single-stream mode.
|
||||
*/
|
||||
public String serialize() {
|
||||
return accountIdOptional.isPresent() ?
|
||||
Joiner.on(DELIMITER).join(accountIdOptional.get(), streamName, streamCreationEpochOptional.get()) :
|
||||
streamName;
|
||||
if (!streamCreationEpochOptional.isPresent()) {
|
||||
// creation epoch is expected to be empty in single-stream mode
|
||||
return streamName;
|
||||
}
|
||||
|
||||
final char delimiter = ':';
|
||||
final StringBuilder sb = new StringBuilder()
|
||||
.append(accountIdOptional.get()).append(delimiter)
|
||||
.append(streamName).append(delimiter)
|
||||
.append(streamCreationEpochOptional.get());
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -62,27 +86,108 @@ public class StreamIdentifier {
|
|||
}
|
||||
|
||||
/**
|
||||
* Create a multi stream instance for StreamIdentifier from serialized stream identifier.
|
||||
* The serialized stream identifier should be of the format account:stream:creationepoch
|
||||
* @param streamIdentifierSer
|
||||
* @return StreamIdentifier
|
||||
* Create a multi stream instance for StreamIdentifier from serialized stream identifier
|
||||
* of format {@link #STREAM_IDENTIFIER_PATTERN}
|
||||
*
|
||||
* @param streamIdentifierSer a String of {@code account:stream:creationEpoch}
|
||||
* @return StreamIdentifier with {@link #accountIdOptional} and {@link #streamCreationEpochOptional} present
|
||||
*/
|
||||
public static StreamIdentifier multiStreamInstance(String streamIdentifierSer) {
|
||||
if (PATTERN.matcher(streamIdentifierSer).matches()) {
|
||||
final String[] split = streamIdentifierSer.split(DELIMITER);
|
||||
return new StreamIdentifier(split[0], split[1], Long.parseLong(split[2]));
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unable to deserialize StreamIdentifier from " + streamIdentifierSer);
|
||||
final Matcher matcher = STREAM_IDENTIFIER_PATTERN.matcher(streamIdentifierSer);
|
||||
if (matcher.matches()) {
|
||||
final String accountId = matcher.group("accountId");
|
||||
final String streamName = matcher.group("streamName");
|
||||
final Long creationEpoch = Long.valueOf(matcher.group("creationEpoch"));
|
||||
|
||||
validateCreationEpoch(creationEpoch);
|
||||
|
||||
return StreamIdentifier.builder()
|
||||
.accountIdOptional(Optional.of(accountId))
|
||||
.streamName(streamName)
|
||||
.streamCreationEpochOptional(Optional.of(creationEpoch))
|
||||
.build();
|
||||
}
|
||||
|
||||
throw new IllegalArgumentException("Unable to deserialize StreamIdentifier from " + streamIdentifierSer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a multi stream instance for StreamIdentifier from stream {@link Arn}.
|
||||
*
|
||||
* @param streamArn an {@link Arn} of format {@link #STREAM_ARN_PATTERN}
|
||||
* @param creationEpoch Creation epoch of the stream. This value will
|
||||
* reflect in the lease key and is assumed to be correct. (KCL could
|
||||
* verify, but that creates issues for both bootstrapping and, with large
|
||||
* KCL applications, API throttling against DescribeStreamSummary.)
|
||||
* If this epoch is reused for two identically-named streams in the same
|
||||
* account -- such as deleting and recreating a stream -- then KCL will
|
||||
* <b>be unable to differentiate leases between the old and new stream</b>
|
||||
* since the lease keys collide on this creation epoch.
|
||||
* @return StreamIdentifier with {@link #accountIdOptional}, {@link #streamCreationEpochOptional},
|
||||
* and {@link #streamArnOptional} present
|
||||
*/
|
||||
public static StreamIdentifier multiStreamInstance(Arn streamArn, long creationEpoch) {
|
||||
validateArn(streamArn);
|
||||
validateCreationEpoch(creationEpoch);
|
||||
|
||||
return StreamIdentifier.builder()
|
||||
.accountIdOptional(streamArn.accountId())
|
||||
.streamName(streamArn.resource().resource())
|
||||
.streamCreationEpochOptional(Optional.of(creationEpoch))
|
||||
.streamArnOptional(Optional.of(streamArn))
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a single stream instance for StreamIdentifier from stream name.
|
||||
* @param streamName
|
||||
* @return StreamIdentifier
|
||||
*
|
||||
* @param streamName stream name of a Kinesis stream
|
||||
*/
|
||||
public static StreamIdentifier singleStreamInstance(String streamName) {
|
||||
Validate.notEmpty(streamName, "StreamName should not be empty");
|
||||
return new StreamIdentifier(streamName);
|
||||
|
||||
return StreamIdentifier.builder()
|
||||
.streamName(streamName)
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a single stream instance for StreamIdentifier from AWS Kinesis stream {@link Arn}.
|
||||
*
|
||||
* @param streamArn AWS ARN of a Kinesis stream
|
||||
* @return StreamIdentifier with {@link #accountIdOptional} and {@link #streamArnOptional} present
|
||||
*/
|
||||
public static StreamIdentifier singleStreamInstance(Arn streamArn) {
|
||||
validateArn(streamArn);
|
||||
|
||||
return StreamIdentifier.builder()
|
||||
.accountIdOptional(streamArn.accountId())
|
||||
.streamName(streamArn.resource().resource())
|
||||
.streamArnOptional(Optional.of(streamArn))
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify the streamArn follows the appropriate formatting.
|
||||
* Throw an exception if it does not.
|
||||
* @param streamArn
|
||||
*/
|
||||
public static void validateArn(Arn streamArn) {
|
||||
if (!STREAM_ARN_PATTERN.matcher(streamArn.toString()).matches() || !streamArn.region().isPresent()) {
|
||||
throw new IllegalArgumentException("Invalid streamArn " + streamArn);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify creationEpoch is greater than 0.
|
||||
* Throw an exception if it is not.
|
||||
* @param creationEpoch
|
||||
*/
|
||||
private static void validateCreationEpoch(long creationEpoch) {
|
||||
if (creationEpoch <= 0) {
|
||||
throw new IllegalArgumentException(
|
||||
"Creation epoch must be > 0; received " + creationEpoch);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,38 @@
|
|||
package software.amazon.kinesis.coordinator;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
||||
import software.amazon.kinesis.common.StreamIdentifier;
|
||||
|
||||
/**
|
||||
* This class is used for storing in-memory set of streams which are no longer existing (deleted) and needs to be
|
||||
* cleaned up from KCL's in memory state.
|
||||
*/
|
||||
@Slf4j
|
||||
public class DeletedStreamListProvider {
|
||||
|
||||
private final Set<StreamIdentifier> deletedStreams;
|
||||
|
||||
public DeletedStreamListProvider() {
|
||||
deletedStreams = ConcurrentHashMap.newKeySet();
|
||||
}
|
||||
|
||||
public void add(StreamIdentifier streamIdentifier) {
|
||||
log.info("Added {}", streamIdentifier);
|
||||
deletedStreams.add(streamIdentifier);
|
||||
}
|
||||
|
||||
/**
|
||||
* Method returns and empties the current set of streams
|
||||
* @return set of deleted Streams
|
||||
*/
|
||||
public Set<StreamIdentifier> purgeAllDeletedStream() {
|
||||
final Set<StreamIdentifier> response = new HashSet<>(deletedStreams);
|
||||
deletedStreams.removeAll(response);
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
|
@ -16,15 +16,15 @@ package software.amazon.kinesis.coordinator;
|
|||
|
||||
public class NoOpWorkerStateChangeListener implements WorkerStateChangeListener {
|
||||
|
||||
/**
|
||||
* Empty constructor for NoOp Worker State Change Listener
|
||||
*/
|
||||
public NoOpWorkerStateChangeListener() {
|
||||
/**
|
||||
* Empty constructor for NoOp Worker State Change Listener
|
||||
*/
|
||||
public NoOpWorkerStateChangeListener() {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onWorkerStateChange(WorkerState newState) {
|
||||
@Override
|
||||
public void onWorkerStateChange(WorkerState newState) {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
|
@ -76,7 +77,7 @@ class PeriodicShardSyncManager {
|
|||
@VisibleForTesting
|
||||
static final BigInteger MAX_HASH_KEY = new BigInteger("2").pow(128).subtract(BigInteger.ONE);
|
||||
static final String PERIODIC_SHARD_SYNC_MANAGER = "PeriodicShardSyncManager";
|
||||
private Map<StreamIdentifier, HashRangeHoleTracker> hashRangeHoleTrackerMap = new HashMap<>();
|
||||
private final Map<StreamIdentifier, HashRangeHoleTracker> hashRangeHoleTrackerMap = new HashMap<>();
|
||||
|
||||
private final String workerId;
|
||||
private final LeaderDecider leaderDecider;
|
||||
|
|
@ -142,15 +143,12 @@ class PeriodicShardSyncManager {
|
|||
/**
|
||||
* Runs shardSync once
|
||||
* Does not schedule periodic shardSync
|
||||
* @return the result of the task
|
||||
*/
|
||||
public synchronized void syncShardsOnce() throws Exception {
|
||||
// TODO: Resume the shard sync from failed stream in the next attempt, to avoid syncing
|
||||
// TODO: for already synced streams
|
||||
for(Map.Entry<StreamIdentifier, StreamConfig> streamConfigEntry : currentStreamConfigMap.entrySet()) {
|
||||
final StreamIdentifier streamIdentifier = streamConfigEntry.getKey();
|
||||
log.info("Syncing Kinesis shard info for " + streamIdentifier);
|
||||
final StreamConfig streamConfig = streamConfigEntry.getValue();
|
||||
for (StreamConfig streamConfig : currentStreamConfigMap.values()) {
|
||||
log.info("Syncing Kinesis shard info for {}", streamConfig);
|
||||
final ShardSyncTaskManager shardSyncTaskManager = shardSyncTaskManagerProvider.apply(streamConfig);
|
||||
final TaskResult taskResult = shardSyncTaskManager.callShardSyncTask();
|
||||
if (taskResult.getException() != null) {
|
||||
|
|
@ -181,30 +179,45 @@ class PeriodicShardSyncManager {
|
|||
final long runStartMillis = System.currentTimeMillis();
|
||||
|
||||
try {
|
||||
// Create a copy of the streams to be considered for this run to avoid data race with Scheduler.
|
||||
final Set<StreamIdentifier> streamConfigMap = new HashSet<>(currentStreamConfigMap.keySet());
|
||||
|
||||
// Construct the stream to leases map to be used in the lease sync
|
||||
final Map<StreamIdentifier, List<Lease>> streamToLeasesMap = getStreamToLeasesMap(
|
||||
currentStreamConfigMap.keySet());
|
||||
final Map<StreamIdentifier, List<Lease>> streamToLeasesMap = getStreamToLeasesMap(streamConfigMap);
|
||||
|
||||
// For each of the stream, check if shard sync needs to be done based on the leases state.
|
||||
for (Map.Entry<StreamIdentifier, StreamConfig> streamConfigEntry : currentStreamConfigMap.entrySet()) {
|
||||
final ShardSyncResponse shardSyncResponse = checkForShardSync(streamConfigEntry.getKey(),
|
||||
streamToLeasesMap.get(streamConfigEntry.getKey()));
|
||||
for (StreamIdentifier streamIdentifier : streamConfigMap) {
|
||||
if (!currentStreamConfigMap.containsKey(streamIdentifier)) {
|
||||
log.info("Skipping shard sync task for {} as stream is purged", streamIdentifier);
|
||||
continue;
|
||||
}
|
||||
final ShardSyncResponse shardSyncResponse = checkForShardSync(streamIdentifier,
|
||||
streamToLeasesMap.get(streamIdentifier));
|
||||
|
||||
numStreamsWithPartialLeases += shardSyncResponse.isHoleDetected() ? 1 : 0;
|
||||
numStreamsToSync += shardSyncResponse.shouldDoShardSync ? 1 : 0;
|
||||
|
||||
if (shardSyncResponse.shouldDoShardSync()) {
|
||||
log.info("Periodic shard syncer initiating shard sync for {} due to the reason - {} ",
|
||||
streamConfigEntry.getKey(), shardSyncResponse.reasonForDecision());
|
||||
streamIdentifier, shardSyncResponse.reasonForDecision());
|
||||
final StreamConfig streamConfig = currentStreamConfigMap.get(streamIdentifier);
|
||||
if (streamConfig == null) {
|
||||
log.info("Skipping shard sync task for {} as stream is purged", streamIdentifier);
|
||||
continue;
|
||||
}
|
||||
final ShardSyncTaskManager shardSyncTaskManager = shardSyncTaskManagerProvider
|
||||
.apply(streamConfigEntry.getValue());
|
||||
.apply(streamConfig);
|
||||
if (!shardSyncTaskManager.submitShardSyncTask()) {
|
||||
log.warn(
|
||||
"Failed to submit shard sync task for stream {}. This could be due to the previous pending shard sync task.",
|
||||
shardSyncTaskManager.shardDetector().streamIdentifier().streamName());
|
||||
} else {
|
||||
log.info("Submitted shard sync task for stream {} because of reason {}",
|
||||
shardSyncTaskManager.shardDetector().streamIdentifier().streamName(),
|
||||
shardSyncResponse.reasonForDecision());
|
||||
}
|
||||
} else {
|
||||
log.info("Skipping shard sync for {} due to the reason - {}", streamConfigEntry.getKey(),
|
||||
log.info("Skipping shard sync for {} due to the reason - {}", streamIdentifier,
|
||||
shardSyncResponse.reasonForDecision());
|
||||
}
|
||||
}
|
||||
|
|
@ -222,6 +235,14 @@ class PeriodicShardSyncManager {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve all the streams, along with their associated leases
|
||||
* @param streamIdentifiersToFilter
|
||||
* @return
|
||||
* @throws DependencyException
|
||||
* @throws ProvisionedThroughputException
|
||||
* @throws InvalidStateException
|
||||
*/
|
||||
private Map<StreamIdentifier, List<Lease>> getStreamToLeasesMap(
|
||||
final Set<StreamIdentifier> streamIdentifiersToFilter)
|
||||
throws DependencyException, ProvisionedThroughputException, InvalidStateException {
|
||||
|
|
@ -242,6 +263,13 @@ class PeriodicShardSyncManager {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Given a list of leases for a stream, determine if a shard sync is necessary.
|
||||
* @param streamIdentifier
|
||||
* @param leases
|
||||
* @return
|
||||
*/
|
||||
@VisibleForTesting
|
||||
ShardSyncResponse checkForShardSync(StreamIdentifier streamIdentifier, List<Lease> leases) {
|
||||
if (CollectionUtils.isNullOrEmpty(leases)) {
|
||||
|
|
@ -264,7 +292,6 @@ class PeriodicShardSyncManager {
|
|||
"Detected same hole for " + hashRangeHoleTracker.getNumConsecutiveHoles()
|
||||
+ " times. Shard sync will be initiated when threshold reaches "
|
||||
+ leasesRecoveryAuditorInconsistencyConfidenceThreshold);
|
||||
|
||||
} else {
|
||||
// If hole is not present, clear any previous tracking for this stream and return false;
|
||||
hashRangeHoleTrackerMap.remove(streamIdentifier);
|
||||
|
|
@ -272,12 +299,24 @@ class PeriodicShardSyncManager {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Object containing metadata about the state of a shard sync
|
||||
*/
|
||||
@Value
|
||||
@Accessors(fluent = true)
|
||||
@VisibleForTesting
|
||||
static class ShardSyncResponse {
|
||||
|
||||
/**
|
||||
* Flag to determine if a shard sync is necessary or not
|
||||
*/
|
||||
private final boolean shouldDoShardSync;
|
||||
|
||||
private final boolean isHoleDetected;
|
||||
|
||||
/**
|
||||
* Reason behind the state of 'shouldDoShardSync' flag
|
||||
*/
|
||||
private final String reasonForDecision;
|
||||
}
|
||||
|
||||
|
|
@ -309,7 +348,7 @@ class PeriodicShardSyncManager {
|
|||
((MultiStreamLease) lease).shardId() :
|
||||
lease.leaseKey();
|
||||
final Shard shard = kinesisShards.get(shardId);
|
||||
if(shard == null) {
|
||||
if (shard == null) {
|
||||
return lease;
|
||||
}
|
||||
lease.hashKeyRange(fromHashKeyRange(shard.hashKeyRange()));
|
||||
|
|
@ -333,7 +372,7 @@ class PeriodicShardSyncManager {
|
|||
List<Lease> leasesWithHashKeyRanges) {
|
||||
// Sort the hash ranges by starting hash key.
|
||||
List<Lease> sortedLeasesWithHashKeyRanges = sortLeasesByHashRange(leasesWithHashKeyRanges);
|
||||
if(sortedLeasesWithHashKeyRanges.isEmpty()) {
|
||||
if (sortedLeasesWithHashKeyRanges.isEmpty()) {
|
||||
log.error("No leases with valid hashranges found for stream {}", streamIdentifier);
|
||||
return Optional.of(new HashRangeHole());
|
||||
}
|
||||
|
|
@ -378,8 +417,9 @@ class PeriodicShardSyncManager {
|
|||
|
||||
@VisibleForTesting
|
||||
static List<Lease> sortLeasesByHashRange(List<Lease> leasesWithHashKeyRanges) {
|
||||
if (leasesWithHashKeyRanges.size() == 0 || leasesWithHashKeyRanges.size() == 1)
|
||||
if (leasesWithHashKeyRanges.size() == 0 || leasesWithHashKeyRanges.size() == 1) {
|
||||
return leasesWithHashKeyRanges;
|
||||
}
|
||||
Collections.sort(leasesWithHashKeyRanges, new HashKeyRangeComparator());
|
||||
return leasesWithHashKeyRanges;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@ import java.util.concurrent.Executors;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
|
@ -54,8 +53,6 @@ import lombok.extern.slf4j.Slf4j;
|
|||
import software.amazon.awssdk.utils.Validate;
|
||||
import software.amazon.kinesis.checkpoint.CheckpointConfig;
|
||||
import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer;
|
||||
import software.amazon.kinesis.common.InitialPositionInStream;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
import software.amazon.kinesis.common.StreamConfig;
|
||||
import software.amazon.kinesis.common.StreamIdentifier;
|
||||
import software.amazon.kinesis.leases.HierarchicalShardSyncer;
|
||||
|
|
@ -75,7 +72,6 @@ import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseSerializer;
|
|||
import software.amazon.kinesis.leases.dynamodb.DynamoDBMultiStreamLeaseSerializer;
|
||||
import software.amazon.kinesis.leases.exceptions.DependencyException;
|
||||
import software.amazon.kinesis.leases.exceptions.InvalidStateException;
|
||||
import software.amazon.kinesis.leases.exceptions.LeasingException;
|
||||
import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException;
|
||||
import software.amazon.kinesis.lifecycle.LifecycleConfig;
|
||||
import software.amazon.kinesis.lifecycle.ShardConsumer;
|
||||
|
|
@ -83,7 +79,6 @@ import software.amazon.kinesis.lifecycle.ShardConsumerArgument;
|
|||
import software.amazon.kinesis.lifecycle.ShardConsumerShutdownNotification;
|
||||
import software.amazon.kinesis.lifecycle.ShutdownNotification;
|
||||
import software.amazon.kinesis.lifecycle.ShutdownReason;
|
||||
import software.amazon.kinesis.lifecycle.TaskResult;
|
||||
import software.amazon.kinesis.metrics.CloudWatchMetricsFactory;
|
||||
import software.amazon.kinesis.metrics.MetricsConfig;
|
||||
import software.amazon.kinesis.metrics.MetricsFactory;
|
||||
|
|
@ -92,16 +87,17 @@ import software.amazon.kinesis.metrics.MetricsScope;
|
|||
import software.amazon.kinesis.metrics.MetricsUtil;
|
||||
import software.amazon.kinesis.processor.Checkpointer;
|
||||
import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy;
|
||||
import software.amazon.kinesis.processor.MultiStreamTracker;
|
||||
import software.amazon.kinesis.processor.ProcessorConfig;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
||||
import software.amazon.kinesis.processor.ShutdownNotificationAware;
|
||||
import software.amazon.kinesis.processor.StreamTracker;
|
||||
import software.amazon.kinesis.retrieval.AggregatorUtil;
|
||||
import software.amazon.kinesis.retrieval.RecordsPublisher;
|
||||
import software.amazon.kinesis.retrieval.RetrievalConfig;
|
||||
import software.amazon.kinesis.schemaregistry.SchemaRegistryDecoder;
|
||||
|
||||
import static software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.StreamsLeasesDeletionType;
|
||||
import static software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.StreamsLeasesDeletionType.FORMER_STREAMS_AUTO_DETECTION_DEFERRED_DELETION;
|
||||
|
||||
/**
|
||||
*
|
||||
|
|
@ -113,16 +109,17 @@ public class Scheduler implements Runnable {
|
|||
|
||||
private static final int PERIODIC_SHARD_SYNC_MAX_WORKERS_DEFAULT = 1;
|
||||
private static final long LEASE_TABLE_CHECK_FREQUENCY_MILLIS = 3 * 1000L;
|
||||
private static final long MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS = 1 * 1000L;
|
||||
private static final long MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS = 1000L;
|
||||
private static final long MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS = 30 * 1000L;
|
||||
private static final long NEW_STREAM_CHECK_INTERVAL_MILLIS = 1 * 60 * 1000L;
|
||||
private static final long NEW_STREAM_CHECK_INTERVAL_MILLIS = 60_000L;
|
||||
private static final boolean SHOULD_DO_LEASE_SYNC_FOR_OLD_STREAMS = false;
|
||||
private static final String MULTI_STREAM_TRACKER = "MultiStreamTracker";
|
||||
private static final String ACTIVE_STREAMS_COUNT = "ActiveStreams.Count";
|
||||
private static final String PENDING_STREAMS_DELETION_COUNT = "StreamsPendingDeletion.Count";
|
||||
private static final String DELETED_STREAMS_COUNT = "DeletedStreams.Count";
|
||||
private static final String NON_EXISTING_STREAM_DELETE_COUNT = "NonExistingStreamDelete.Count";
|
||||
|
||||
private SchedulerLog slog = new SchedulerLog();
|
||||
private final SchedulerLog slog = new SchedulerLog();
|
||||
|
||||
private final CheckpointConfig checkpointConfig;
|
||||
private final CoordinatorConfig coordinatorConfig;
|
||||
|
|
@ -142,7 +139,6 @@ public class Scheduler implements Runnable {
|
|||
private final ExecutorService executorService;
|
||||
private final DiagnosticEventFactory diagnosticEventFactory;
|
||||
private final DiagnosticEventHandler diagnosticEventHandler;
|
||||
// private final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy;
|
||||
private final LeaseCoordinator leaseCoordinator;
|
||||
private final Function<StreamConfig, ShardSyncTaskManager> shardSyncTaskManagerProvider;
|
||||
private final Map<StreamConfig, ShardSyncTaskManager> streamToShardSyncTaskManagerMap = new HashMap<>();
|
||||
|
|
@ -156,10 +152,9 @@ public class Scheduler implements Runnable {
|
|||
private final long failoverTimeMillis;
|
||||
private final long taskBackoffTimeMillis;
|
||||
private final boolean isMultiStreamMode;
|
||||
private final Map<StreamIdentifier, StreamConfig> currentStreamConfigMap;
|
||||
private MultiStreamTracker multiStreamTracker;
|
||||
private FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy;
|
||||
private InitialPositionInStreamExtended orphanedStreamInitialPositionInStream;
|
||||
private final Map<StreamIdentifier, StreamConfig> currentStreamConfigMap = new ConcurrentHashMap<>();
|
||||
private final StreamTracker streamTracker;
|
||||
private final FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy;
|
||||
private final long listShardsBackoffTimeMillis;
|
||||
private final int maxListShardsRetryAttempts;
|
||||
private final LeaseRefresher leaseRefresher;
|
||||
|
|
@ -173,9 +168,11 @@ public class Scheduler implements Runnable {
|
|||
private final LeaseCleanupManager leaseCleanupManager;
|
||||
private final SchemaRegistryDecoder schemaRegistryDecoder;
|
||||
|
||||
private final DeletedStreamListProvider deletedStreamListProvider;
|
||||
|
||||
// Holds consumers for shards the worker is currently tracking. Key is shard
|
||||
// info, value is ShardConsumer.
|
||||
private ConcurrentMap<ShardInfo, ShardConsumer> shardInfoShardConsumerMap = new ConcurrentHashMap<>();
|
||||
private final ConcurrentMap<ShardInfo, ShardConsumer> shardInfoShardConsumerMap = new ConcurrentHashMap<>();
|
||||
|
||||
private volatile boolean shutdown;
|
||||
private volatile long shutdownStartTimeMillis;
|
||||
|
|
@ -183,8 +180,11 @@ public class Scheduler implements Runnable {
|
|||
|
||||
private final Object lock = new Object();
|
||||
|
||||
private Stopwatch streamSyncWatch = Stopwatch.createUnstarted();
|
||||
private final Stopwatch streamSyncWatch = Stopwatch.createUnstarted();
|
||||
|
||||
private boolean leasesSyncedOnAppInit = false;
|
||||
@Getter(AccessLevel.NONE)
|
||||
private boolean shouldSyncLeases = true;
|
||||
|
||||
/**
|
||||
* Used to ensure that only one requestedShutdown is in progress at a time.
|
||||
|
|
@ -226,18 +226,13 @@ public class Scheduler implements Runnable {
|
|||
this.retrievalConfig = retrievalConfig;
|
||||
|
||||
this.applicationName = this.coordinatorConfig.applicationName();
|
||||
this.isMultiStreamMode = this.retrievalConfig.appStreamTracker().map(
|
||||
multiStreamTracker -> true, streamConfig -> false);
|
||||
this.currentStreamConfigMap = this.retrievalConfig.appStreamTracker().map(
|
||||
multiStreamTracker -> {
|
||||
this.multiStreamTracker = multiStreamTracker;
|
||||
this.formerStreamsLeasesDeletionStrategy = multiStreamTracker.formerStreamsLeasesDeletionStrategy();
|
||||
this.orphanedStreamInitialPositionInStream = multiStreamTracker.orphanedStreamInitialPositionInStream();
|
||||
return multiStreamTracker.streamConfigList().stream()
|
||||
.collect(Collectors.toMap(sc -> sc.streamIdentifier(), sc -> sc));
|
||||
},
|
||||
streamConfig ->
|
||||
Collections.singletonMap(streamConfig.streamIdentifier(), streamConfig));
|
||||
this.streamTracker = retrievalConfig.streamTracker();
|
||||
this.isMultiStreamMode = streamTracker.isMultiStream();
|
||||
this.formerStreamsLeasesDeletionStrategy = streamTracker.formerStreamsLeasesDeletionStrategy();
|
||||
streamTracker.streamConfigList().forEach(
|
||||
sc -> currentStreamConfigMap.put(sc.streamIdentifier(), sc));
|
||||
log.info("Initial state: {}", currentStreamConfigMap.values());
|
||||
|
||||
this.maxInitializationAttempts = this.coordinatorConfig.maxInitializationAttempts();
|
||||
this.metricsFactory = this.metricsConfig.metricsFactory();
|
||||
// Determine leaseSerializer based on availability of MultiStreamTracker.
|
||||
|
|
@ -263,9 +258,10 @@ public class Scheduler implements Runnable {
|
|||
this.executorService = this.coordinatorConfig.coordinatorFactory().createExecutorService();
|
||||
this.diagnosticEventFactory = diagnosticEventFactory;
|
||||
this.diagnosticEventHandler = new DiagnosticEventLogger();
|
||||
this.deletedStreamListProvider = new DeletedStreamListProvider();
|
||||
this.shardSyncTaskManagerProvider = streamConfig -> this.leaseManagementConfig
|
||||
.leaseManagementFactory(leaseSerializer, isMultiStreamMode)
|
||||
.createShardSyncTaskManager(this.metricsFactory, streamConfig);
|
||||
.createShardSyncTaskManager(this.metricsFactory, streamConfig, this.deletedStreamListProvider);
|
||||
this.shardPrioritization = this.coordinatorConfig.shardPrioritization();
|
||||
this.cleanupLeasesUponShardCompletion = this.leaseManagementConfig.cleanupLeasesUponShardCompletion();
|
||||
this.skipShardSyncAtWorkerInitializationIfLeasesExist =
|
||||
|
|
@ -287,8 +283,6 @@ public class Scheduler implements Runnable {
|
|||
PERIODIC_SHARD_SYNC_MAX_WORKERS_DEFAULT);
|
||||
this.failoverTimeMillis = this.leaseManagementConfig.failoverTimeMillis();
|
||||
this.taskBackoffTimeMillis = this.lifecycleConfig.taskBackoffTimeMillis();
|
||||
// this.retryGetRecordsInSeconds = this.retrievalConfig.retryGetRecordsInSeconds();
|
||||
// this.maxGetRecordsThreadPool = this.retrievalConfig.maxGetRecordsThreadPool();
|
||||
this.listShardsBackoffTimeMillis = this.retrievalConfig.listShardsBackoffTimeInMillis();
|
||||
this.maxListShardsRetryAttempts = this.retrievalConfig.maxListShardsRetryAttempts();
|
||||
this.shardDetectorProvider = streamConfig -> createOrGetShardSyncTaskManager(streamConfig).shardDetector();
|
||||
|
|
@ -344,11 +338,9 @@ public class Scheduler implements Runnable {
|
|||
|
||||
for (int i = 0; (!isDone) && (i < maxInitializationAttempts); i++) {
|
||||
try {
|
||||
log.info("Initialization attempt {}", (i + 1));
|
||||
log.info("Initializing LeaseCoordinator");
|
||||
log.info("Initializing LeaseCoordinator attempt {}", (i + 1));
|
||||
leaseCoordinator.initialize();
|
||||
|
||||
TaskResult result;
|
||||
if (!skipShardSyncAtWorkerInitializationIfLeasesExist || leaseRefresher.isLeaseTableEmpty()) {
|
||||
if (shouldInitiateLeaseSync()) {
|
||||
log.info("Worker {} is initiating the lease sync.", leaseManagementConfig.workerIdentifier());
|
||||
|
|
@ -429,6 +421,8 @@ public class Scheduler implements Runnable {
|
|||
// check for new streams and sync with the scheduler state
|
||||
if (isLeader()) {
|
||||
checkAndSyncStreamShardsAndLeases();
|
||||
} else {
|
||||
shouldSyncLeases = true;
|
||||
}
|
||||
|
||||
logExecutorState();
|
||||
|
|
@ -436,7 +430,7 @@ public class Scheduler implements Runnable {
|
|||
Thread.sleep(shardConsumerDispatchPollIntervalMillis);
|
||||
} catch (Exception e) {
|
||||
log.error("Worker.run caught exception, sleeping for {} milli seconds!",
|
||||
String.valueOf(shardConsumerDispatchPollIntervalMillis), e);
|
||||
shardConsumerDispatchPollIntervalMillis, e);
|
||||
try {
|
||||
Thread.sleep(shardConsumerDispatchPollIntervalMillis);
|
||||
} catch (InterruptedException ex) {
|
||||
|
|
@ -450,7 +444,6 @@ public class Scheduler implements Runnable {
|
|||
return leaderDecider.isLeader(leaseManagementConfig.workerIdentifier());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Note: This method has package level access solely for testing purposes.
|
||||
* Sync all streams method.
|
||||
|
|
@ -465,34 +458,29 @@ public class Scheduler implements Runnable {
|
|||
final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, MULTI_STREAM_TRACKER);
|
||||
|
||||
try {
|
||||
|
||||
final Map<StreamIdentifier, StreamConfig> newStreamConfigMap = new HashMap<>();
|
||||
final Duration waitPeriodToDeleteOldStreams = formerStreamsLeasesDeletionStrategy.waitPeriodToDeleteFormerStreams();
|
||||
// Making an immutable copy
|
||||
newStreamConfigMap.putAll(multiStreamTracker.streamConfigList().stream()
|
||||
.collect(Collectors.toMap(sc -> sc.streamIdentifier(), sc -> sc)));
|
||||
|
||||
List<MultiStreamLease> leases;
|
||||
|
||||
final Map<StreamIdentifier, StreamConfig> newStreamConfigMap = streamTracker.streamConfigList()
|
||||
.stream().collect(Collectors.toMap(StreamConfig::streamIdentifier, Function.identity()));
|
||||
// This is done to ensure that we clean up the stale streams lingering in the lease table.
|
||||
if (!leasesSyncedOnAppInit && isMultiStreamMode) {
|
||||
leases = fetchMultiStreamLeases();
|
||||
syncStreamsFromLeaseTableOnAppInit(leases);
|
||||
if (isMultiStreamMode && (shouldSyncLeases || !leasesSyncedOnAppInit)) {
|
||||
// Skip updating the stream map due to no new stream since last sync
|
||||
if (newStreamConfigMap.keySet().stream().anyMatch(s -> !currentStreamConfigMap.containsKey(s))) {
|
||||
syncStreamsFromLeaseTableOnAppInit(fetchMultiStreamLeases());
|
||||
}
|
||||
leasesSyncedOnAppInit = true;
|
||||
shouldSyncLeases = false;
|
||||
}
|
||||
|
||||
// For new streams discovered, do a shard sync and update the currentStreamConfigMap
|
||||
for (StreamIdentifier streamIdentifier : newStreamConfigMap.keySet()) {
|
||||
if (!currentStreamConfigMap.containsKey(streamIdentifier)) {
|
||||
log.info("Found new stream to process: " + streamIdentifier + ". Syncing shards of that stream.");
|
||||
ShardSyncTaskManager shardSyncTaskManager = createOrGetShardSyncTaskManager(newStreamConfigMap.get(streamIdentifier));
|
||||
final StreamConfig streamConfig = newStreamConfigMap.get(streamIdentifier);
|
||||
log.info("Found new stream to process: {}. Syncing shards of that stream.", streamConfig);
|
||||
ShardSyncTaskManager shardSyncTaskManager = createOrGetShardSyncTaskManager(streamConfig);
|
||||
shardSyncTaskManager.submitShardSyncTask();
|
||||
currentStreamConfigMap.put(streamIdentifier, newStreamConfigMap.get(streamIdentifier));
|
||||
currentStreamConfigMap.put(streamIdentifier, streamConfig);
|
||||
streamsSynced.add(streamIdentifier);
|
||||
} else {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug(streamIdentifier + " is already being processed - skipping shard sync.");
|
||||
}
|
||||
log.debug("{} is already being processed - skipping shard sync.", streamIdentifier);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -502,7 +490,7 @@ public class Scheduler implements Runnable {
|
|||
}
|
||||
};
|
||||
|
||||
if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() == StreamsLeasesDeletionType.FORMER_STREAMS_AUTO_DETECTION_DEFERRED_DELETION) {
|
||||
if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() == FORMER_STREAMS_AUTO_DETECTION_DEFERRED_DELETION) {
|
||||
// Now, we are identifying the stale/old streams and enqueuing it for deferred deletion.
|
||||
// It is assumed that all the workers will always have the latest and consistent snapshot of streams
|
||||
// from the multiStreamTracker.
|
||||
|
|
@ -521,12 +509,10 @@ public class Scheduler implements Runnable {
|
|||
// In order to give workers with stale stream info, sufficient time to learn about the new streams
|
||||
// before attempting to delete it, we will be deferring the leases deletion based on the
|
||||
// defer time period.
|
||||
|
||||
currentStreamConfigMap.keySet().stream().forEach(streamIdentifier -> enqueueStreamLeaseDeletionOperation.accept(streamIdentifier));
|
||||
|
||||
currentStreamConfigMap.keySet().forEach(enqueueStreamLeaseDeletionOperation);
|
||||
} else if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() == StreamsLeasesDeletionType.PROVIDED_STREAMS_DEFERRED_DELETION) {
|
||||
Optional.ofNullable(formerStreamsLeasesDeletionStrategy.streamIdentifiersForLeaseCleanup()).ifPresent(
|
||||
streamIdentifiers -> streamIdentifiers.stream().forEach(streamIdentifier -> enqueueStreamLeaseDeletionOperation.accept(streamIdentifier)));
|
||||
streamIdentifiers -> streamIdentifiers.forEach(enqueueStreamLeaseDeletionOperation));
|
||||
} else {
|
||||
// Remove the old/stale streams identified through the new and existing streams list, without
|
||||
// cleaning up their leases. Disabling deprecated shard sync + lease cleanup through a flag.
|
||||
|
|
@ -536,7 +522,8 @@ public class Scheduler implements Runnable {
|
|||
if (!newStreamConfigMap.containsKey(streamIdentifier)) {
|
||||
if (SHOULD_DO_LEASE_SYNC_FOR_OLD_STREAMS) {
|
||||
log.info(
|
||||
"Found old/deleted stream : {}. Triggering shard sync. Removing from tracked active streams.", streamIdentifier);
|
||||
"Found old/deleted stream : {}. Triggering shard sync. Removing from tracked active streams.",
|
||||
streamIdentifier);
|
||||
ShardSyncTaskManager shardSyncTaskManager = createOrGetShardSyncTaskManager(
|
||||
currentStreamConfigMap.get(streamIdentifier));
|
||||
shardSyncTaskManager.submitShardSyncTask();
|
||||
|
|
@ -551,13 +538,32 @@ public class Scheduler implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
final Duration waitPeriodToDeleteOldStreams =
|
||||
formerStreamsLeasesDeletionStrategy.waitPeriodToDeleteFormerStreams();
|
||||
// Now let's scan the streamIdentifiersForLeaseCleanup eligible for deferred deletion and delete them.
|
||||
// StreamIdentifiers are eligible for deletion only when the deferment period has elapsed and
|
||||
// the streamIdentifiersForLeaseCleanup are not present in the latest snapshot.
|
||||
final Map<Boolean, Set<StreamIdentifier>> staleStreamIdDeletionDecisionMap = staleStreamDeletionMap.keySet().stream().collect(Collectors
|
||||
.partitioningBy(streamIdentifier -> newStreamConfigMap.containsKey(streamIdentifier), Collectors.toSet()));
|
||||
final Set<StreamIdentifier> staleStreamIdsToBeDeleted = staleStreamIdDeletionDecisionMap.get(false).stream().filter(streamIdentifier ->
|
||||
Duration.between(staleStreamDeletionMap.get(streamIdentifier), Instant.now()).toMillis() >= waitPeriodToDeleteOldStreams.toMillis()).collect(Collectors.toSet());
|
||||
final Map<Boolean, Set<StreamIdentifier>> staleStreamIdDeletionDecisionMap =
|
||||
staleStreamDeletionMap.keySet().stream().collect(
|
||||
Collectors.partitioningBy(newStreamConfigMap::containsKey, Collectors.toSet()));
|
||||
final Set<StreamIdentifier> staleStreamIdsToBeDeleted = staleStreamIdDeletionDecisionMap.get(false)
|
||||
.stream().filter(streamIdentifier ->
|
||||
Duration.between(staleStreamDeletionMap.get(streamIdentifier), Instant.now())
|
||||
.toMillis() >= waitPeriodToDeleteOldStreams.toMillis())
|
||||
.collect(Collectors.toSet());
|
||||
// These are the streams which are deleted in Kinesis and we encounter resource not found during
|
||||
// shardSyncTask. This is applicable in MultiStreamMode only, in case of SingleStreamMode, store will
|
||||
// not have any data.
|
||||
// Filter streams based on newStreamConfigMap so that we don't override input to KCL in any case.
|
||||
final Set<StreamIdentifier> deletedStreamSet = this.deletedStreamListProvider
|
||||
.purgeAllDeletedStream()
|
||||
.stream()
|
||||
.filter(streamIdentifier -> !newStreamConfigMap.containsKey(streamIdentifier))
|
||||
.collect(Collectors.toSet());
|
||||
if (deletedStreamSet.size() > 0) {
|
||||
log.info("Stale streams to delete: {}", deletedStreamSet);
|
||||
staleStreamIdsToBeDeleted.addAll(deletedStreamSet);
|
||||
}
|
||||
final Set<StreamIdentifier> deletedStreamsLeases = deleteMultiStreamLeases(staleStreamIdsToBeDeleted);
|
||||
streamsSynced.addAll(deletedStreamsLeases);
|
||||
|
||||
|
|
@ -577,6 +583,8 @@ public class Scheduler implements Runnable {
|
|||
MetricsUtil.addCount(metricsScope, ACTIVE_STREAMS_COUNT, newStreamConfigMap.size(), MetricsLevel.SUMMARY);
|
||||
MetricsUtil.addCount(metricsScope, PENDING_STREAMS_DELETION_COUNT, staleStreamDeletionMap.size(),
|
||||
MetricsLevel.SUMMARY);
|
||||
MetricsUtil.addCount(metricsScope, NON_EXISTING_STREAM_DELETE_COUNT, deletedStreamSet.size(),
|
||||
MetricsLevel.SUMMARY);
|
||||
MetricsUtil.addCount(metricsScope, DELETED_STREAMS_COUNT, deletedStreamsLeases.size(), MetricsLevel.SUMMARY);
|
||||
} finally {
|
||||
MetricsUtil.endScope(metricsScope);
|
||||
|
|
@ -585,20 +593,22 @@ public class Scheduler implements Runnable {
|
|||
return streamsSynced;
|
||||
}
|
||||
|
||||
@VisibleForTesting boolean shouldSyncStreamsNow() {
|
||||
@VisibleForTesting
|
||||
boolean shouldSyncStreamsNow() {
|
||||
return isMultiStreamMode &&
|
||||
(streamSyncWatch.elapsed(TimeUnit.MILLISECONDS) > NEW_STREAM_CHECK_INTERVAL_MILLIS);
|
||||
}
|
||||
|
||||
@VisibleForTesting void syncStreamsFromLeaseTableOnAppInit(List<MultiStreamLease> leases) {
|
||||
final Set<StreamIdentifier> streamIdentifiers = leases.stream()
|
||||
@VisibleForTesting
|
||||
void syncStreamsFromLeaseTableOnAppInit(List<MultiStreamLease> leases) {
|
||||
leases.stream()
|
||||
.map(lease -> StreamIdentifier.multiStreamInstance(lease.streamIdentifier()))
|
||||
.collect(Collectors.toSet());
|
||||
for (StreamIdentifier streamIdentifier : streamIdentifiers) {
|
||||
if (!currentStreamConfigMap.containsKey(streamIdentifier)) {
|
||||
currentStreamConfigMap.put(streamIdentifier, getOrphanedStreamConfig(streamIdentifier));
|
||||
}
|
||||
}
|
||||
.filter(streamIdentifier -> !currentStreamConfigMap.containsKey(streamIdentifier))
|
||||
.forEach(streamIdentifier -> {
|
||||
final StreamConfig streamConfig = streamTracker.createStreamConfig(streamIdentifier);
|
||||
currentStreamConfigMap.put(streamIdentifier, streamConfig);
|
||||
log.info("Cached {}", streamConfig);
|
||||
});
|
||||
}
|
||||
|
||||
private List<MultiStreamLease> fetchMultiStreamLeases()
|
||||
|
|
@ -607,35 +617,34 @@ public class Scheduler implements Runnable {
|
|||
}
|
||||
|
||||
private void removeStreamsFromStaleStreamsList(Set<StreamIdentifier> streamIdentifiers) {
|
||||
for(StreamIdentifier streamIdentifier : streamIdentifiers) {
|
||||
for (StreamIdentifier streamIdentifier : streamIdentifiers) {
|
||||
staleStreamDeletionMap.remove(streamIdentifier);
|
||||
}
|
||||
}
|
||||
|
||||
private Set<StreamIdentifier> deleteMultiStreamLeases(Set<StreamIdentifier> streamIdentifiers)
|
||||
throws DependencyException, ProvisionedThroughputException, InvalidStateException {
|
||||
if (streamIdentifiers.isEmpty()) {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
log.info("Deleting streams: {}", streamIdentifiers);
|
||||
final Set<StreamIdentifier> streamsSynced = new HashSet<>();
|
||||
List<MultiStreamLease> leases = null;
|
||||
Map<String, List<MultiStreamLease>> streamIdToShardsMap = null;
|
||||
for(StreamIdentifier streamIdentifier : streamIdentifiers) {
|
||||
if (leases == null) {
|
||||
// Lazy Load once and use many times for this iteration.
|
||||
leases = fetchMultiStreamLeases();
|
||||
}
|
||||
if (streamIdToShardsMap == null) {
|
||||
// Lazy load once and use many times for this iteration.
|
||||
streamIdToShardsMap = leases.stream().collect(Collectors
|
||||
.groupingBy(MultiStreamLease::streamIdentifier,
|
||||
Collectors.toCollection(ArrayList::new)));
|
||||
}
|
||||
log.warn("Found old/deleted stream: " + streamIdentifier + ". Directly deleting leases of this stream.");
|
||||
final List<MultiStreamLease> leases = fetchMultiStreamLeases();
|
||||
final Map<String, List<MultiStreamLease>> streamIdToShardsMap = leases.stream().collect(
|
||||
Collectors.groupingBy(MultiStreamLease::streamIdentifier, Collectors.toCollection(ArrayList::new)));
|
||||
for (StreamIdentifier streamIdentifier : streamIdentifiers) {
|
||||
// Deleting leases will cause the workers to shutdown the record processors for these shards.
|
||||
if (deleteMultiStreamLeases(streamIdToShardsMap.get(streamIdentifier.serialize()))) {
|
||||
log.warn("Found old/deleted stream: {}. Directly deleting leases of this stream.", streamIdentifier);
|
||||
currentStreamConfigMap.remove(streamIdentifier);
|
||||
staleStreamDeletionMap.remove(streamIdentifier);
|
||||
streamsSynced.add(streamIdentifier);
|
||||
}
|
||||
}
|
||||
if (!streamsSynced.isEmpty()) {
|
||||
// map keys are StreamIdentifiers, which are members of StreamConfig, and therefore redundant
|
||||
log.info("Streams retained post-deletion: {}", currentStreamConfigMap.values());
|
||||
}
|
||||
return streamsSynced;
|
||||
}
|
||||
|
||||
|
|
@ -655,11 +664,6 @@ public class Scheduler implements Runnable {
|
|||
return true;
|
||||
}
|
||||
|
||||
// Generate default StreamConfig for an "orphaned" stream that is in the lease table but not tracked
|
||||
private StreamConfig getOrphanedStreamConfig(StreamIdentifier streamIdentifier) {
|
||||
return new StreamConfig(streamIdentifier, orphanedStreamInitialPositionInStream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether worker can shutdown immediately. Note that this method is called from Worker's {{@link #run()}
|
||||
* method before every loop run, so method must do minimum amount of work to not impact shard processing timings.
|
||||
|
|
@ -919,7 +923,11 @@ public class Scheduler implements Runnable {
|
|||
// Irrespective of single stream app or multi stream app, streamConfig should always be available.
|
||||
// If we have a shardInfo, that is not present in currentStreamConfigMap for whatever reason, then return default stream config
|
||||
// to gracefully complete the reading.
|
||||
final StreamConfig streamConfig = currentStreamConfigMap.getOrDefault(streamIdentifier, getOrphanedStreamConfig(streamIdentifier));
|
||||
StreamConfig streamConfig = currentStreamConfigMap.get(streamIdentifier);
|
||||
if (streamConfig == null) {
|
||||
streamConfig = streamTracker.createStreamConfig(streamIdentifier);
|
||||
log.info("Created orphan {}", streamConfig);
|
||||
}
|
||||
Validate.notNull(streamConfig, "StreamConfig should not be null");
|
||||
RecordsPublisher cache = retrievalConfig.retrievalFactory().createGetRecordsCache(shardInfo, streamConfig, metricsFactory);
|
||||
ShardConsumerArgument argument = new ShardConsumerArgument(shardInfo,
|
||||
|
|
@ -1016,7 +1024,7 @@ public class Scheduler implements Runnable {
|
|||
@NoArgsConstructor(access = AccessLevel.PRIVATE)
|
||||
private static class SchedulerLog {
|
||||
|
||||
private long reportIntervalMillis = TimeUnit.MINUTES.toMillis(1);
|
||||
private final long reportIntervalMillis = TimeUnit.MINUTES.toMillis(1);
|
||||
private long nextReportTime = System.currentTimeMillis() + reportIntervalMillis;
|
||||
private boolean infoReporting;
|
||||
|
||||
|
|
|
|||
|
|
@ -19,16 +19,16 @@ package software.amazon.kinesis.coordinator;
|
|||
*/
|
||||
@FunctionalInterface
|
||||
public interface WorkerStateChangeListener {
|
||||
enum WorkerState {
|
||||
CREATED,
|
||||
INITIALIZING,
|
||||
STARTED,
|
||||
SHUT_DOWN_STARTED,
|
||||
SHUT_DOWN
|
||||
}
|
||||
enum WorkerState {
|
||||
CREATED,
|
||||
INITIALIZING,
|
||||
STARTED,
|
||||
SHUT_DOWN_STARTED,
|
||||
SHUT_DOWN
|
||||
}
|
||||
|
||||
void onWorkerStateChange(WorkerState newState);
|
||||
void onWorkerStateChange(WorkerState newState);
|
||||
|
||||
default void onAllInitializationAttemptsFailed(Throwable e) {
|
||||
}
|
||||
default void onAllInitializationAttemptsFailed(Throwable e) {
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ package software.amazon.kinesis.leases;
|
|||
import java.io.Serializable;
|
||||
import java.math.BigInteger;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
|
|
@ -39,6 +40,7 @@ import lombok.NonNull;
|
|||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import software.amazon.awssdk.services.kinesis.model.ChildShard;
|
||||
import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
|
||||
import software.amazon.awssdk.services.kinesis.model.Shard;
|
||||
import software.amazon.awssdk.services.kinesis.model.ShardFilter;
|
||||
import software.amazon.awssdk.services.kinesis.model.ShardFilterType;
|
||||
|
|
@ -47,6 +49,7 @@ import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|||
import software.amazon.kinesis.common.InitialPositionInStream;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
import software.amazon.kinesis.common.StreamIdentifier;
|
||||
import software.amazon.kinesis.coordinator.DeletedStreamListProvider;
|
||||
import software.amazon.kinesis.exceptions.internal.KinesisClientLibIOException;
|
||||
import software.amazon.kinesis.leases.exceptions.DependencyException;
|
||||
import software.amazon.kinesis.leases.exceptions.InvalidStateException;
|
||||
|
|
@ -56,13 +59,14 @@ import software.amazon.kinesis.metrics.MetricsScope;
|
|||
import software.amazon.kinesis.metrics.MetricsUtil;
|
||||
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
||||
|
||||
import static java.util.Objects.nonNull;
|
||||
import static software.amazon.kinesis.common.HashKeyRangeForLease.fromHashKeyRange;
|
||||
|
||||
/**
|
||||
* Helper class to sync leases with shards of the Kinesis stream.
|
||||
* It will create new leases/activities when it discovers new Kinesis shards (bootstrap/resharding).
|
||||
* It deletes leases for shards that have been trimmed from Kinesis, or if we've completed processing it
|
||||
* and begun processing it's child shards.
|
||||
* and begun processing its child shards.
|
||||
*/
|
||||
@Slf4j
|
||||
@KinesisClientInternalApi
|
||||
|
|
@ -72,23 +76,30 @@ public class HierarchicalShardSyncer {
|
|||
|
||||
private final String streamIdentifier;
|
||||
|
||||
private final DeletedStreamListProvider deletedStreamListProvider;
|
||||
|
||||
private static final String MIN_HASH_KEY = BigInteger.ZERO.toString();
|
||||
private static final String MAX_HASH_KEY = new BigInteger("2").pow(128).subtract(BigInteger.ONE).toString();
|
||||
private static final int retriesForCompleteHashRange = 3;
|
||||
private static final int RETRIES_FOR_COMPLETE_HASH_RANGE = 3;
|
||||
|
||||
private static final long DELAY_BETWEEN_LIST_SHARDS_MILLIS = 1000;
|
||||
|
||||
public HierarchicalShardSyncer() {
|
||||
isMultiStreamMode = false;
|
||||
streamIdentifier = "SingleStreamMode";
|
||||
this(false, "SingleStreamMode");
|
||||
}
|
||||
|
||||
public HierarchicalShardSyncer(final boolean isMultiStreamMode, final String streamIdentifier) {
|
||||
this.isMultiStreamMode = isMultiStreamMode;
|
||||
this.streamIdentifier = streamIdentifier;
|
||||
this(isMultiStreamMode, streamIdentifier, null);
|
||||
}
|
||||
|
||||
private static final BiFunction<Lease, MultiStreamArgs, String> shardIdFromLeaseDeducer =
|
||||
public HierarchicalShardSyncer(final boolean isMultiStreamMode, final String streamIdentifier,
|
||||
final DeletedStreamListProvider deletedStreamListProvider) {
|
||||
this.isMultiStreamMode = isMultiStreamMode;
|
||||
this.streamIdentifier = streamIdentifier;
|
||||
this.deletedStreamListProvider = deletedStreamListProvider;
|
||||
}
|
||||
|
||||
private static final BiFunction<Lease, MultiStreamArgs, String> SHARD_ID_FROM_LEASE_DEDUCER =
|
||||
(lease, multiStreamArgs) ->
|
||||
multiStreamArgs.isMultiStreamMode() ?
|
||||
((MultiStreamLease) lease).shardId() :
|
||||
|
|
@ -109,7 +120,6 @@ public class HierarchicalShardSyncer {
|
|||
* @throws ProvisionedThroughputException
|
||||
* @throws KinesisClientLibIOException
|
||||
*/
|
||||
// CHECKSTYLE:OFF CyclomaticComplexity
|
||||
public synchronized boolean checkAndCreateLeaseForNewShards(@NonNull final ShardDetector shardDetector,
|
||||
final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition,
|
||||
final MetricsScope scope, final boolean ignoreUnexpectedChildShards, final boolean isLeaseTableEmpty)
|
||||
|
|
@ -120,14 +130,13 @@ public class HierarchicalShardSyncer {
|
|||
isLeaseTableEmpty);
|
||||
}
|
||||
|
||||
//Provide a pre-collcted list of shards to avoid calling ListShards API
|
||||
/**
|
||||
* Provide a pre-collected list of shards to avoid calling ListShards API
|
||||
*/
|
||||
public synchronized boolean checkAndCreateLeaseForNewShards(@NonNull final ShardDetector shardDetector,
|
||||
final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition,
|
||||
List<Shard> latestShards, final boolean ignoreUnexpectedChildShards, final MetricsScope scope, final boolean isLeaseTableEmpty)
|
||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException {
|
||||
|
||||
//TODO: Need to add multistream support for this https://sim.amazon.com/issues/KinesisLTR-191
|
||||
|
||||
if (!CollectionUtils.isNullOrEmpty(latestShards)) {
|
||||
log.debug("{} - Num shards: {}", streamIdentifier, latestShards.size());
|
||||
} else {
|
||||
|
|
@ -149,19 +158,27 @@ public class HierarchicalShardSyncer {
|
|||
new NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap);
|
||||
final List<Lease> newLeasesToCreate = determineNewLeasesToCreate(leaseSynchronizer, latestShards, currentLeases,
|
||||
initialPosition, inconsistentShardIds, multiStreamArgs);
|
||||
log.debug("{} - Num new leases to create: {}", streamIdentifier, newLeasesToCreate.size());
|
||||
log.info("{} - Number of new leases to create: {}", streamIdentifier, newLeasesToCreate.size());
|
||||
|
||||
final Set<Lease> createdLeases = new HashSet<>();
|
||||
|
||||
for (Lease lease : newLeasesToCreate) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
final long startTime = System.currentTimeMillis();
|
||||
boolean success = false;
|
||||
try {
|
||||
leaseRefresher.createLeaseIfNotExists(lease);
|
||||
if (leaseRefresher.createLeaseIfNotExists(lease)) {
|
||||
createdLeases.add(lease);
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
MetricsUtil.addSuccessAndLatency(scope, "CreateLease", success, startTime, MetricsLevel.DETAILED);
|
||||
if (lease.checkpoint() != null) {
|
||||
final String metricName = lease.checkpoint().isSentinelCheckpoint() ? lease.checkpoint().sequenceNumber() : "SEQUENCE_NUMBER";
|
||||
MetricsUtil.addSuccess(scope, "CreateLease_" + metricName, true, MetricsLevel.DETAILED);
|
||||
}
|
||||
}
|
||||
}
|
||||
final List<Lease> trackedLeases = new ArrayList<>(currentLeases);
|
||||
trackedLeases.addAll(newLeasesToCreate);
|
||||
log.info("{} - Newly created leases {}: {}", streamIdentifier, createdLeases.size(), createdLeases);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -175,7 +192,9 @@ public class HierarchicalShardSyncer {
|
|||
if (!CollectionUtils.isNullOrEmpty(inconsistentShardIds)) {
|
||||
final String ids = StringUtils.join(inconsistentShardIds, ' ');
|
||||
throw new KinesisClientLibIOException(String.format(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"%d open child shards (%s) are inconsistent. This can happen due to a race condition between describeStream and a reshard operation.",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
inconsistentShardIds.size(), ids));
|
||||
}
|
||||
}
|
||||
|
|
@ -195,38 +214,6 @@ public class HierarchicalShardSyncer {
|
|||
.flatMap(entry -> shardIdToChildShardIdsMap.get(entry.getKey()).stream()).collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
private synchronized void assertHashRangeOfClosedShardIsCovered(final Shard closedShard,
|
||||
final Map<String, Shard> shardIdToShardMap, final Set<String> childShardIds)
|
||||
throws KinesisClientLibIOException {
|
||||
BigInteger minStartingHashKeyOfChildren = null;
|
||||
BigInteger maxEndingHashKeyOfChildren = null;
|
||||
|
||||
final BigInteger startingHashKeyOfClosedShard = new BigInteger(closedShard.hashKeyRange().startingHashKey());
|
||||
final BigInteger endingHashKeyOfClosedShard = new BigInteger(closedShard.hashKeyRange().endingHashKey());
|
||||
|
||||
for (String childShardId : childShardIds) {
|
||||
final Shard childShard = shardIdToShardMap.get(childShardId);
|
||||
final BigInteger startingHashKey = new BigInteger(childShard.hashKeyRange().startingHashKey());
|
||||
if (minStartingHashKeyOfChildren == null || startingHashKey.compareTo(minStartingHashKeyOfChildren) < 0) {
|
||||
minStartingHashKeyOfChildren = startingHashKey;
|
||||
}
|
||||
|
||||
final BigInteger endingHashKey = new BigInteger(childShard.hashKeyRange().endingHashKey());
|
||||
if (maxEndingHashKeyOfChildren == null || endingHashKey.compareTo(maxEndingHashKeyOfChildren) > 0) {
|
||||
maxEndingHashKeyOfChildren = endingHashKey;
|
||||
}
|
||||
}
|
||||
|
||||
if (minStartingHashKeyOfChildren == null || maxEndingHashKeyOfChildren == null
|
||||
|| minStartingHashKeyOfChildren.compareTo(startingHashKeyOfClosedShard) > 0
|
||||
|| maxEndingHashKeyOfChildren.compareTo(endingHashKeyOfClosedShard) < 0) {
|
||||
throw new KinesisClientLibIOException(String.format(
|
||||
"Incomplete shard list: hash key range of shard %s is not covered by its child shards.",
|
||||
closedShard.shardId()));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to construct shardId->setOfChildShardIds map.
|
||||
* Note: This has package access for testing purposes only.
|
||||
|
|
@ -262,7 +249,6 @@ public class HierarchicalShardSyncer {
|
|||
* @return ShardFilter shard filter for the corresponding position in the stream.
|
||||
*/
|
||||
private static ShardFilter getShardFilterFromInitialPosition(InitialPositionInStreamExtended initialPositionInStreamExtended) {
|
||||
|
||||
ShardFilter.Builder builder = ShardFilter.builder();
|
||||
|
||||
switch (initialPositionInStreamExtended.getInitialPositionInStream()) {
|
||||
|
|
@ -287,7 +273,7 @@ public class HierarchicalShardSyncer {
|
|||
|
||||
List<Shard> shards;
|
||||
|
||||
for (int i = 0; i < retriesForCompleteHashRange; i++) {
|
||||
for (int i = 0; i < RETRIES_FOR_COMPLETE_HASH_RANGE; i++) {
|
||||
shards = shardDetector.listShardsWithFilter(shardFilter);
|
||||
|
||||
if (shards == null) {
|
||||
|
|
@ -303,18 +289,26 @@ public class HierarchicalShardSyncer {
|
|||
}
|
||||
|
||||
throw new KinesisClientLibIOException("Hash range of shards returned for " + streamName + " was incomplete after "
|
||||
+ retriesForCompleteHashRange + " retries.");
|
||||
+ RETRIES_FOR_COMPLETE_HASH_RANGE + " retries.");
|
||||
}
|
||||
|
||||
private static List<Shard> getShardList(@NonNull final ShardDetector shardDetector) throws KinesisClientLibIOException {
|
||||
final Optional<List<Shard>> shards = Optional.of(shardDetector.listShards());
|
||||
private List<Shard> getShardList(@NonNull final ShardDetector shardDetector) throws KinesisClientLibIOException {
|
||||
// Fallback to existing behavior for backward compatibility
|
||||
List<Shard> shardList = Collections.emptyList();
|
||||
try {
|
||||
shardList = shardDetector.listShardsWithoutConsumingResourceNotFoundException();
|
||||
} catch (ResourceNotFoundException e) {
|
||||
if (nonNull(this.deletedStreamListProvider) && isMultiStreamMode) {
|
||||
deletedStreamListProvider.add(StreamIdentifier.multiStreamInstance(streamIdentifier));
|
||||
}
|
||||
}
|
||||
final Optional<List<Shard>> shards = Optional.of(shardList);
|
||||
|
||||
return shards.orElseThrow(() -> new KinesisClientLibIOException("Stream " + shardDetector.streamIdentifier().streamName() +
|
||||
" is not in ACTIVE OR UPDATING state - will retry getting the shard list."));
|
||||
}
|
||||
|
||||
private static boolean isHashRangeOfShardsComplete(@NonNull List<Shard> shards) {
|
||||
|
||||
if (shards.isEmpty()) {
|
||||
throw new IllegalStateException("No shards found when attempting to validate complete hash range.");
|
||||
}
|
||||
|
|
@ -376,7 +370,8 @@ public class HierarchicalShardSyncer {
|
|||
* @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard
|
||||
*/
|
||||
static List<Lease> determineNewLeasesToCreate(final LeaseSynchronizer leaseSynchronizer, final List<Shard> shards,
|
||||
final List<Lease> currentLeases, final InitialPositionInStreamExtended initialPosition,final Set<String> inconsistentShardIds) {
|
||||
final List<Lease> currentLeases, final InitialPositionInStreamExtended initialPosition,
|
||||
final Set<String> inconsistentShardIds) {
|
||||
return determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, initialPosition, inconsistentShardIds,
|
||||
new MultiStreamArgs(false, null));
|
||||
}
|
||||
|
|
@ -413,7 +408,6 @@ public class HierarchicalShardSyncer {
|
|||
* @param memoizationContext Memoization of shards that have been evaluated as part of the evaluation
|
||||
* @return true if the shard is a descendant of any current shard (lease already exists)
|
||||
*/
|
||||
// CHECKSTYLE:OFF CyclomaticComplexity
|
||||
static boolean checkIfDescendantAndAddNewLeasesForAncestors(final String shardId,
|
||||
final InitialPositionInStreamExtended initialPosition, final Set<String> shardIdsOfCurrentLeases,
|
||||
final Map<String, Shard> shardIdToShardMapOfAllKinesisShards,
|
||||
|
|
@ -434,6 +428,7 @@ public class HierarchicalShardSyncer {
|
|||
isDescendant = true;
|
||||
// We don't need to add leases of its ancestors,
|
||||
// because we'd have done it when creating a lease for this shard.
|
||||
log.debug("{} - Shard {} is a descendant shard of an existing shard. Skipping lease creation", streamIdentifier, shardId);
|
||||
} else {
|
||||
|
||||
final Shard shard = shardIdToShardMapOfAllKinesisShards.get(shardId);
|
||||
|
|
@ -460,7 +455,7 @@ public class HierarchicalShardSyncer {
|
|||
if (!shardIdsOfCurrentLeases.contains(parentShardId)) {
|
||||
Lease lease = shardIdToLeaseMapOfNewShards.get(parentShardId);
|
||||
|
||||
/**
|
||||
/*
|
||||
* If the lease for the parent shard does not already exist, there are two cases in which we
|
||||
* would want to create it:
|
||||
* - If we have already marked the parentShardId for lease creation in a prior recursive
|
||||
|
|
@ -482,7 +477,7 @@ public class HierarchicalShardSyncer {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* If the shard is a descendant and the specified initial position is AT_TIMESTAMP, then the
|
||||
* checkpoint should be set to AT_TIMESTAMP, else to TRIM_HORIZON. For AT_TIMESTAMP, we will
|
||||
* add a lease just like we do for TRIM_HORIZON. However we will only return back records
|
||||
|
|
@ -510,9 +505,14 @@ public class HierarchicalShardSyncer {
|
|||
if (descendantParentShardIds.contains(parentShardId)
|
||||
&& !initialPosition.getInitialPositionInStream()
|
||||
.equals(InitialPositionInStream.AT_TIMESTAMP)) {
|
||||
log.info("Setting Lease '{}' checkpoint to 'TRIM_HORIZON'. Checkpoint was previously set to {}",
|
||||
lease.leaseKey(), lease.checkpoint());
|
||||
lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||
} else {
|
||||
lease.checkpoint(convertToCheckpoint(initialPosition));
|
||||
final ExtendedSequenceNumber newCheckpoint = convertToCheckpoint(initialPosition);
|
||||
log.info("Setting Lease '{}' checkpoint to '{}'. Checkpoint was previously set to {}",
|
||||
lease.leaseKey(), newCheckpoint, lease.checkpoint());
|
||||
lease.checkpoint(newCheckpoint);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -544,13 +544,11 @@ public class HierarchicalShardSyncer {
|
|||
new MultiStreamArgs(false, null));
|
||||
}
|
||||
|
||||
// CHECKSTYLE:ON CyclomaticComplexity
|
||||
|
||||
/**
|
||||
* Helper method to get parent shardIds of the current shard - includes the parent shardIds if:
|
||||
* a/ they are not null
|
||||
* b/ if they exist in the current shard map (i.e. haven't expired)
|
||||
*
|
||||
*
|
||||
* @param shard Will return parents of this shard
|
||||
* @param shardIdToShardMapOfAllKinesisShards ShardId->Shard map containing all shards obtained via DescribeStream.
|
||||
* @return Set of parentShardIds
|
||||
|
|
@ -569,20 +567,28 @@ public class HierarchicalShardSyncer {
|
|||
return parentShardIds;
|
||||
}
|
||||
|
||||
public synchronized Lease createLeaseForChildShard(final ChildShard childShard, final StreamIdentifier streamIdentifier) throws InvalidStateException {
|
||||
public synchronized Lease createLeaseForChildShard(final ChildShard childShard,
|
||||
final StreamIdentifier streamIdentifier) throws InvalidStateException {
|
||||
final MultiStreamArgs multiStreamArgs = new MultiStreamArgs(isMultiStreamMode, streamIdentifier);
|
||||
|
||||
return multiStreamArgs.isMultiStreamMode() ? newKCLMultiStreamLeaseForChildShard(childShard, streamIdentifier)
|
||||
: newKCLLeaseForChildShard(childShard);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a lease object for the given Child Shard. Checkpoint is set to TRIM_HORIZON
|
||||
* @param childShard Shard for which a lease should be created
|
||||
* @return Lease for the shard
|
||||
* @throws InvalidStateException If the child shard has no parent shards
|
||||
*/
|
||||
private static Lease newKCLLeaseForChildShard(final ChildShard childShard) throws InvalidStateException {
|
||||
Lease newLease = new Lease();
|
||||
newLease.leaseKey(childShard.shardId());
|
||||
if (!CollectionUtils.isNullOrEmpty(childShard.parentShards())) {
|
||||
newLease.parentShardIds(childShard.parentShards());
|
||||
} else {
|
||||
throw new InvalidStateException("Unable to populate new lease for child shard " + childShard.shardId() + "because parent shards cannot be found.");
|
||||
throw new InvalidStateException("Unable to populate new lease for child shard " + childShard.shardId()
|
||||
+ " because parent shards cannot be found.");
|
||||
}
|
||||
newLease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||
newLease.ownerSwitchesSinceCheckpoint(0L);
|
||||
|
|
@ -590,13 +596,15 @@ public class HierarchicalShardSyncer {
|
|||
return newLease;
|
||||
}
|
||||
|
||||
private static Lease newKCLMultiStreamLeaseForChildShard(final ChildShard childShard, final StreamIdentifier streamIdentifier) throws InvalidStateException {
|
||||
private static Lease newKCLMultiStreamLeaseForChildShard(final ChildShard childShard,
|
||||
final StreamIdentifier streamIdentifier) throws InvalidStateException {
|
||||
MultiStreamLease newLease = new MultiStreamLease();
|
||||
newLease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), childShard.shardId()));
|
||||
if (!CollectionUtils.isNullOrEmpty(childShard.parentShards())) {
|
||||
newLease.parentShardIds(childShard.parentShards());
|
||||
} else {
|
||||
throw new InvalidStateException("Unable to populate new lease for child shard " + childShard.shardId() + "because parent shards cannot be found.");
|
||||
throw new InvalidStateException("Unable to populate new lease for child shard " + childShard.shardId()
|
||||
+ " because parent shards cannot be found.");
|
||||
}
|
||||
newLease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON);
|
||||
newLease.ownerSwitchesSinceCheckpoint(0L);
|
||||
|
|
@ -609,9 +617,8 @@ public class HierarchicalShardSyncer {
|
|||
/**
|
||||
* Helper method to create a new Lease POJO for a shard.
|
||||
* Note: Package level access only for testing purposes
|
||||
*
|
||||
*
|
||||
* @param shard
|
||||
* @return
|
||||
*/
|
||||
private static Lease newKCLLease(final Shard shard) {
|
||||
Lease newLease = new Lease();
|
||||
|
|
@ -649,7 +656,7 @@ public class HierarchicalShardSyncer {
|
|||
|
||||
/**
|
||||
* Helper method to construct a shardId->Shard map for the specified list of shards.
|
||||
*
|
||||
*
|
||||
* @param shards List of shards
|
||||
* @return ShardId->Shard map
|
||||
*/
|
||||
|
|
@ -660,7 +667,7 @@ public class HierarchicalShardSyncer {
|
|||
/**
|
||||
* Helper method to return all the open shards for a stream.
|
||||
* Note: Package level access only for testing purposes.
|
||||
*
|
||||
*
|
||||
* @param allShards All shards returved via DescribeStream. We assume this to represent a consistent shard list.
|
||||
* @return List of open shards (shards at the tip of the stream) - may include shards that are not yet active.
|
||||
*/
|
||||
|
|
@ -671,7 +678,7 @@ public class HierarchicalShardSyncer {
|
|||
|
||||
private static ExtendedSequenceNumber convertToCheckpoint(final InitialPositionInStreamExtended position) {
|
||||
ExtendedSequenceNumber checkpoint = null;
|
||||
|
||||
|
||||
if (position.getInitialPositionInStream().equals(InitialPositionInStream.TRIM_HORIZON)) {
|
||||
checkpoint = ExtendedSequenceNumber.TRIM_HORIZON;
|
||||
} else if (position.getInitialPositionInStream().equals(InitialPositionInStream.LATEST)) {
|
||||
|
|
@ -679,7 +686,7 @@ public class HierarchicalShardSyncer {
|
|||
} else if (position.getInitialPositionInStream().equals(InitialPositionInStream.AT_TIMESTAMP)) {
|
||||
checkpoint = ExtendedSequenceNumber.AT_TIMESTAMP;
|
||||
}
|
||||
|
||||
|
||||
return checkpoint;
|
||||
}
|
||||
|
||||
|
|
@ -726,31 +733,30 @@ public class HierarchicalShardSyncer {
|
|||
* We assume that lease1 and lease2 are:
|
||||
* a/ not null,
|
||||
* b/ shards (if found) have non-null starting sequence numbers
|
||||
*
|
||||
*
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public int compare(final Lease lease1, final Lease lease2) {
|
||||
int result = 0;
|
||||
final String shardId1 = shardIdFromLeaseDeducer.apply(lease1, multiStreamArgs);
|
||||
final String shardId2 = shardIdFromLeaseDeducer.apply(lease2, multiStreamArgs);
|
||||
final String shardId1 = SHARD_ID_FROM_LEASE_DEDUCER.apply(lease1, multiStreamArgs);
|
||||
final String shardId2 = SHARD_ID_FROM_LEASE_DEDUCER.apply(lease2, multiStreamArgs);
|
||||
final Shard shard1 = shardIdToShardMap.get(shardId1);
|
||||
final Shard shard2 = shardIdToShardMap.get(shardId2);
|
||||
|
||||
|
||||
// If we found shards for the two leases, use comparison of the starting sequence numbers
|
||||
if (shard1 != null && shard2 != null) {
|
||||
BigInteger sequenceNumber1 = new BigInteger(shard1.sequenceNumberRange().startingSequenceNumber());
|
||||
BigInteger sequenceNumber2 = new BigInteger(shard2.sequenceNumberRange().startingSequenceNumber());
|
||||
result = sequenceNumber1.compareTo(sequenceNumber2);
|
||||
result = sequenceNumber1.compareTo(sequenceNumber2);
|
||||
}
|
||||
|
||||
|
||||
if (result == 0) {
|
||||
result = shardId1.compareTo(shardId2);
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Data
|
||||
|
|
@ -807,7 +813,7 @@ public class HierarchicalShardSyncer {
|
|||
final Map<String, Shard> shardIdToShardMapOfAllKinesisShards = constructShardIdToShardMap(shards);
|
||||
|
||||
currentLeases.stream().peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease))
|
||||
.map(lease -> shardIdFromLeaseDeducer.apply(lease, multiStreamArgs))
|
||||
.map(lease -> SHARD_ID_FROM_LEASE_DEDUCER.apply(lease, multiStreamArgs))
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
final List<Lease> newLeasesToCreate = getLeasesToCreateForOpenAndClosedShards(initialPosition, shards, multiStreamArgs, streamIdentifier);
|
||||
|
|
@ -839,11 +845,10 @@ public class HierarchicalShardSyncer {
|
|||
shardIdToNewLeaseMap.put(shardId, lease);
|
||||
}
|
||||
|
||||
return new ArrayList(shardIdToNewLeaseMap.values());
|
||||
return new ArrayList<>(shardIdToNewLeaseMap.values());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Class to help create leases when the lease table is not initially empty.
|
||||
*/
|
||||
|
|
@ -878,22 +883,20 @@ public class HierarchicalShardSyncer {
|
|||
* * the parent shard has expired.
|
||||
* <p>
|
||||
* For example:
|
||||
* <pre>
|
||||
* Shard structure (each level depicts a stream segment):
|
||||
* 0 1 2 3 4 5 - shards till epoch 102
|
||||
* \ / \ / | |
|
||||
* 6 7 4 5 - shards from epoch 103 - 205
|
||||
* \ / | / \
|
||||
* 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber)
|
||||
*
|
||||
* Current leases: (4, 5, 7)
|
||||
*
|
||||
* If initial position is LATEST:
|
||||
* - New leases to create: (6)
|
||||
* If initial position is TRIM_HORIZON:
|
||||
* - New leases to create: (0, 1)
|
||||
* If initial position is AT_TIMESTAMP(epoch=200):
|
||||
* - New leases to create: (0, 1)
|
||||
*
|
||||
* </pre>
|
||||
* Assuming current leases are (4, 5, 7), new leases to create for an initial position are:
|
||||
* <ul>
|
||||
* <li>LATEST: (6)</li>
|
||||
* <li>TRIM_HORIZON: (0, 1)</li>
|
||||
* <li>AT_TIMESTAMP(epoch=200): (0, 1)</li>
|
||||
* </ul>
|
||||
* <p>
|
||||
* The leases returned are sorted by the starting sequence number - following the same order
|
||||
* when persisting the leases in DynamoDB will ensure that we recover gracefully if we fail
|
||||
|
|
@ -916,7 +919,7 @@ public class HierarchicalShardSyncer {
|
|||
.map(streamId -> streamId.serialize()).orElse("");
|
||||
final Set<String> shardIdsOfCurrentLeases = currentLeases.stream()
|
||||
.peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease))
|
||||
.map(lease -> shardIdFromLeaseDeducer.apply(lease, multiStreamArgs))
|
||||
.map(lease -> SHARD_ID_FROM_LEASE_DEDUCER.apply(lease, multiStreamArgs))
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
final List<Shard> openShards = getOpenShards(shards, streamIdentifier);
|
||||
|
|
@ -973,8 +976,8 @@ public class HierarchicalShardSyncer {
|
|||
*/
|
||||
@NoArgsConstructor
|
||||
static class MemoizationContext {
|
||||
private Map<String, Boolean> isDescendantMap = new HashMap<>();
|
||||
private Map<String, Boolean> shouldCreateLeaseMap = new HashMap<>();
|
||||
private final Map<String, Boolean> isDescendantMap = new HashMap<>();
|
||||
private final Map<String, Boolean> shouldCreateLeaseMap = new HashMap<>();
|
||||
|
||||
Boolean isDescendant(String shardId) {
|
||||
return isDescendantMap.get(shardId);
|
||||
|
|
|
|||
|
|
@ -64,6 +64,22 @@ import software.amazon.kinesis.retrieval.AWSExceptionManager;
|
|||
@KinesisClientInternalApi
|
||||
public class KinesisShardDetector implements ShardDetector {
|
||||
|
||||
/**
|
||||
* Reusable {@link AWSExceptionManager}.
|
||||
* <p>
|
||||
* N.B. This instance is mutable, but thread-safe for <b>read-only</b> use.
|
||||
* </p>
|
||||
*/
|
||||
private static final AWSExceptionManager AWS_EXCEPTION_MANAGER;
|
||||
|
||||
static {
|
||||
AWS_EXCEPTION_MANAGER = new AWSExceptionManager();
|
||||
AWS_EXCEPTION_MANAGER.add(KinesisException.class, t -> t);
|
||||
AWS_EXCEPTION_MANAGER.add(LimitExceededException.class, t -> t);
|
||||
AWS_EXCEPTION_MANAGER.add(ResourceInUseException.class, t -> t);
|
||||
AWS_EXCEPTION_MANAGER.add(ResourceNotFoundException.class, t -> t);
|
||||
}
|
||||
|
||||
@NonNull
|
||||
private final KinesisAsyncClient kinesisClient;
|
||||
@NonNull @Getter
|
||||
|
|
@ -78,7 +94,9 @@ public class KinesisShardDetector implements ShardDetector {
|
|||
private volatile Map<String, Shard> cachedShardMap = null;
|
||||
private volatile Instant lastCacheUpdateTime;
|
||||
@Getter(AccessLevel.PACKAGE)
|
||||
private AtomicInteger cacheMisses = new AtomicInteger(0);
|
||||
private final AtomicInteger cacheMisses = new AtomicInteger(0);
|
||||
|
||||
private static final Boolean THROW_RESOURCE_NOT_FOUND_EXCEPTION = true;
|
||||
|
||||
@Deprecated
|
||||
public KinesisShardDetector(KinesisAsyncClient kinesisClient, String streamName, long listShardsBackoffTimeInMillis,
|
||||
|
|
@ -159,15 +177,26 @@ public class KinesisShardDetector implements ShardDetector {
|
|||
return listShardsWithFilter(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Synchronized
|
||||
public List<Shard> listShardsWithoutConsumingResourceNotFoundException() {
|
||||
return listShardsWithFilterInternal(null, THROW_RESOURCE_NOT_FOUND_EXCEPTION);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Synchronized
|
||||
public List<Shard> listShardsWithFilter(ShardFilter shardFilter) {
|
||||
return listShardsWithFilterInternal(shardFilter, !THROW_RESOURCE_NOT_FOUND_EXCEPTION);
|
||||
}
|
||||
|
||||
private List<Shard> listShardsWithFilterInternal(ShardFilter shardFilter,
|
||||
boolean shouldPropagateResourceNotFoundException) {
|
||||
final List<Shard> shards = new ArrayList<>();
|
||||
ListShardsResponse result;
|
||||
String nextToken = null;
|
||||
|
||||
do {
|
||||
result = listShards(shardFilter, nextToken);
|
||||
result = listShards(shardFilter, nextToken, shouldPropagateResourceNotFoundException);
|
||||
|
||||
if (result == null) {
|
||||
/*
|
||||
|
|
@ -185,20 +214,19 @@ public class KinesisShardDetector implements ShardDetector {
|
|||
return shards;
|
||||
}
|
||||
|
||||
private ListShardsResponse listShards(ShardFilter shardFilter, final String nextToken) {
|
||||
final AWSExceptionManager exceptionManager = new AWSExceptionManager();
|
||||
exceptionManager.add(ResourceNotFoundException.class, t -> t);
|
||||
exceptionManager.add(LimitExceededException.class, t -> t);
|
||||
exceptionManager.add(ResourceInUseException.class, t -> t);
|
||||
exceptionManager.add(KinesisException.class, t -> t);
|
||||
|
||||
/**
|
||||
* @param shouldPropagateResourceNotFoundException : used to determine if ResourceNotFoundException should be
|
||||
* handled by method and return Empty list or propagate the exception.
|
||||
*/
|
||||
private ListShardsResponse listShards(ShardFilter shardFilter, final String nextToken,
|
||||
final boolean shouldPropagateResourceNotFoundException) {
|
||||
ListShardsRequest.Builder builder = KinesisRequestsBuilder.listShardsRequestBuilder();
|
||||
if (StringUtils.isEmpty(nextToken)) {
|
||||
builder = builder.streamName(streamIdentifier.streamName()).shardFilter(shardFilter);
|
||||
builder.streamName(streamIdentifier.streamName()).shardFilter(shardFilter);
|
||||
streamIdentifier.streamArnOptional().ifPresent(arn -> builder.streamARN(arn.toString()));
|
||||
} else {
|
||||
builder = builder.nextToken(nextToken);
|
||||
builder.nextToken(nextToken);
|
||||
}
|
||||
|
||||
final ListShardsRequest request = builder.build();
|
||||
log.info("Stream {}: listing shards with list shards request {}", streamIdentifier, request);
|
||||
|
||||
|
|
@ -211,7 +239,7 @@ public class KinesisShardDetector implements ShardDetector {
|
|||
try {
|
||||
result = getListShardsResponse(request);
|
||||
} catch (ExecutionException e) {
|
||||
throw exceptionManager.apply(e.getCause());
|
||||
throw AWS_EXCEPTION_MANAGER.apply(e.getCause());
|
||||
} catch (InterruptedException e) {
|
||||
// TODO: check if this is the correct behavior for Interrupted Exception
|
||||
log.debug("Interrupted exception caught, shutdown initiated, returning null");
|
||||
|
|
@ -233,9 +261,14 @@ public class KinesisShardDetector implements ShardDetector {
|
|||
} catch (ResourceNotFoundException e) {
|
||||
log.warn("Got ResourceNotFoundException when fetching shard list for {}. Stream no longer exists.",
|
||||
streamIdentifier.streamName());
|
||||
return ListShardsResponse.builder().shards(Collections.emptyList())
|
||||
.nextToken(null)
|
||||
.build();
|
||||
if (shouldPropagateResourceNotFoundException) {
|
||||
throw e;
|
||||
}
|
||||
return ListShardsResponse.builder()
|
||||
.shards(Collections.emptyList())
|
||||
.nextToken(null)
|
||||
.build();
|
||||
|
||||
} catch (TimeoutException te) {
|
||||
throw new RuntimeException(te);
|
||||
}
|
||||
|
|
@ -275,11 +308,12 @@ public class KinesisShardDetector implements ShardDetector {
|
|||
|
||||
@Override
|
||||
public List<ChildShard> getChildShards(final String shardId) throws InterruptedException, ExecutionException, TimeoutException {
|
||||
final GetShardIteratorRequest getShardIteratorRequest = KinesisRequestsBuilder.getShardIteratorRequestBuilder()
|
||||
final GetShardIteratorRequest.Builder requestBuilder = KinesisRequestsBuilder.getShardIteratorRequestBuilder()
|
||||
.streamName(streamIdentifier.streamName())
|
||||
.shardIteratorType(ShardIteratorType.LATEST)
|
||||
.shardId(shardId)
|
||||
.build();
|
||||
.shardId(shardId);
|
||||
streamIdentifier.streamArnOptional().ifPresent(arn -> requestBuilder.streamARN(arn.toString()));
|
||||
final GetShardIteratorRequest getShardIteratorRequest = requestBuilder.build();
|
||||
|
||||
final GetShardIteratorResponse getShardIteratorResponse =
|
||||
FutureUtils.resolveOrCancelFuture(kinesisClient.getShardIterator(getShardIteratorRequest), kinesisRequestTimeout);
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
|||
@EqualsAndHashCode(exclude = {"concurrencyToken", "lastCounterIncrementNanos", "childShardIds", "pendingCheckpointState", "isMarkedForLeaseSteal"})
|
||||
@ToString
|
||||
public class Lease {
|
||||
/*
|
||||
/**
|
||||
* See javadoc for System.nanoTime - summary:
|
||||
*
|
||||
* Sometimes System.nanoTime's return values will wrap due to overflow. When they do, the difference between two
|
||||
|
|
@ -51,62 +51,57 @@ public class Lease {
|
|||
private static final long MAX_ABS_AGE_NANOS = TimeUnit.DAYS.toNanos(365);
|
||||
|
||||
/**
|
||||
* @return leaseKey - identifies the unit of work associated with this lease.
|
||||
* Identifies the unit of work associated with this lease.
|
||||
*/
|
||||
private String leaseKey;
|
||||
/**
|
||||
* @return current owner of the lease, may be null.
|
||||
* Current owner of the lease, may be null.
|
||||
*/
|
||||
private String leaseOwner;
|
||||
/**
|
||||
* @return leaseCounter is incremented periodically by the holder of the lease. Used for optimistic locking.
|
||||
* LeaseCounter is incremented periodically by the holder of the lease. Used for optimistic locking.
|
||||
*/
|
||||
private Long leaseCounter = 0L;
|
||||
|
||||
/*
|
||||
/**
|
||||
* This field is used to prevent updates to leases that we have lost and re-acquired. It is deliberately not
|
||||
* persisted in DynamoDB and excluded from hashCode and equals.
|
||||
*/
|
||||
private UUID concurrencyToken;
|
||||
|
||||
/*
|
||||
/**
|
||||
* This field is used by LeaseRenewer and LeaseTaker to track the last time a lease counter was incremented. It is
|
||||
* deliberately not persisted in DynamoDB and excluded from hashCode and equals.
|
||||
*/
|
||||
private Long lastCounterIncrementNanos;
|
||||
/**
|
||||
* @return most recently application-supplied checkpoint value. During fail over, the new worker will pick up after
|
||||
* Most recently application-supplied checkpoint value. During fail over, the new worker will pick up after
|
||||
* the old worker's last checkpoint.
|
||||
*/
|
||||
private ExtendedSequenceNumber checkpoint;
|
||||
/**
|
||||
* @return pending checkpoint, possibly null.
|
||||
* Pending checkpoint, possibly null.
|
||||
*/
|
||||
private ExtendedSequenceNumber pendingCheckpoint;
|
||||
|
||||
/**
|
||||
* Last pending application state. Deliberately excluded from hashCode and equals.
|
||||
*
|
||||
* @return pending checkpoint state, possibly null.
|
||||
* Last pending checkpoint state, possibly null. Deliberately excluded from hashCode and equals.
|
||||
*/
|
||||
private byte[] pendingCheckpointState;
|
||||
|
||||
|
||||
/**
|
||||
* Denotes whether the lease is marked for stealing. Deliberately excluded from hashCode and equals and
|
||||
* not persisted in DynamoDB.
|
||||
*
|
||||
* @return flag for denoting lease is marked for stealing.
|
||||
*/
|
||||
@Setter
|
||||
private boolean isMarkedForLeaseSteal;
|
||||
|
||||
/**
|
||||
* @return count of distinct lease holders between checkpoints.
|
||||
* Count of distinct lease holders between checkpoints.
|
||||
*/
|
||||
private Long ownerSwitchesSinceCheckpoint = 0L;
|
||||
private Set<String> parentShardIds = new HashSet<>();
|
||||
private Set<String> childShardIds = new HashSet<>();
|
||||
private final Set<String> parentShardIds = new HashSet<>();
|
||||
private final Set<String> childShardIds = new HashSet<>();
|
||||
private HashKeyRangeForLease hashKeyRangeForLease;
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -117,8 +117,8 @@ public class LeaseCleanupManager {
|
|||
public void enqueueForDeletion(LeasePendingDeletion leasePendingDeletion) {
|
||||
final Lease lease = leasePendingDeletion.lease();
|
||||
if (lease == null) {
|
||||
log.warn("Cannot enqueue lease {} for deferred deletion - instance doesn't hold the lease for that shard.",
|
||||
lease.leaseKey());
|
||||
log.warn("Cannot enqueue {} for {} as instance doesn't hold the lease for that shard.",
|
||||
leasePendingDeletion.shardInfo(), leasePendingDeletion.streamIdentifier());
|
||||
} else {
|
||||
log.debug("Enqueuing lease {} for deferred deletion.", lease.leaseKey());
|
||||
if (!deletionQueue.add(leasePendingDeletion)) {
|
||||
|
|
@ -145,10 +145,18 @@ public class LeaseCleanupManager {
|
|||
return deletionQueue.size();
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @return true if the 'Completed Lease Stopwatch' has elapsed more time than the 'Completed Lease Cleanup Interval'
|
||||
*/
|
||||
private boolean timeToCheckForCompletedShard() {
|
||||
return completedLeaseStopwatch.elapsed(TimeUnit.MILLISECONDS) >= completedLeaseCleanupIntervalMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @return true if the 'Garbage Lease Stopwatch' has elapsed more time than the 'Garbage Lease Cleanup Interval'
|
||||
*/
|
||||
private boolean timeToCheckForGarbageShard() {
|
||||
return garbageLeaseStopwatch.elapsed(TimeUnit.MILLISECONDS) >= garbageLeaseCleanupIntervalMillis;
|
||||
}
|
||||
|
|
@ -171,7 +179,7 @@ public class LeaseCleanupManager {
|
|||
try {
|
||||
if (cleanupLeasesUponShardCompletion && timeToCheckForCompletedShard) {
|
||||
final Lease leaseFromDDB = leaseCoordinator.leaseRefresher().getLease(lease.leaseKey());
|
||||
if(leaseFromDDB != null) {
|
||||
if (leaseFromDDB != null) {
|
||||
Set<String> childShardKeys = leaseFromDDB.childShardIds();
|
||||
if (CollectionUtils.isNullOrEmpty(childShardKeys)) {
|
||||
try {
|
||||
|
|
@ -200,7 +208,8 @@ public class LeaseCleanupManager {
|
|||
log.warn("Unable to cleanup lease for shard {} in {}", shardInfo.shardId(), streamIdentifier.streamName(), e);
|
||||
}
|
||||
} else {
|
||||
log.info("Lease not present in lease table while cleaning the shard {} of {}", shardInfo.shardId(), streamIdentifier.streamName());
|
||||
log.info("Lease not present in lease table while cleaning the shard {} of {}",
|
||||
shardInfo.shardId(), streamIdentifier.streamName());
|
||||
cleanedUpCompletedLease = true;
|
||||
}
|
||||
}
|
||||
|
|
@ -224,13 +233,26 @@ public class LeaseCleanupManager {
|
|||
|
||||
// A lease that ended with SHARD_END from ResourceNotFoundException is safe to delete if it no longer exists in the
|
||||
// stream (known explicitly from ResourceNotFound being thrown when processing this shard),
|
||||
private boolean cleanupLeaseForGarbageShard(Lease lease, Throwable e) throws DependencyException, ProvisionedThroughputException, InvalidStateException {
|
||||
private boolean cleanupLeaseForGarbageShard(Lease lease, Throwable e)
|
||||
throws DependencyException, ProvisionedThroughputException, InvalidStateException {
|
||||
log.warn("Deleting lease {} as it is not present in the stream.", lease, e);
|
||||
leaseCoordinator.leaseRefresher().deleteLease(lease);
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean allParentShardLeasesDeleted(Lease lease, ShardInfo shardInfo) throws DependencyException, ProvisionedThroughputException, InvalidStateException {
|
||||
/**
|
||||
* Check if the all of the parent shards for a given lease have an ongoing lease. If any one parent still has a
|
||||
* lease, return false. Otherwise return true
|
||||
*
|
||||
* @param lease
|
||||
* @param shardInfo
|
||||
* @return
|
||||
* @throws DependencyException
|
||||
* @throws ProvisionedThroughputException
|
||||
* @throws InvalidStateException
|
||||
*/
|
||||
private boolean allParentShardLeasesDeleted(Lease lease, ShardInfo shardInfo)
|
||||
throws DependencyException, ProvisionedThroughputException, InvalidStateException {
|
||||
for (String parentShard : lease.parentShardIds()) {
|
||||
final Lease parentLease = leaseCoordinator.leaseRefresher().getLease(ShardInfo.getLeaseKey(shardInfo, parentShard));
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package software.amazon.kinesis.leases;
|
|||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
|
|
@ -28,8 +29,11 @@ import lombok.Data;
|
|||
import lombok.NonNull;
|
||||
import lombok.experimental.Accessors;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
|
||||
import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList;
|
||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
||||
import software.amazon.awssdk.services.dynamodb.model.Tag;
|
||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
||||
import software.amazon.kinesis.common.InitialPositionInStream;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
|
|
@ -64,22 +68,16 @@ public class LeaseManagementConfig {
|
|||
|
||||
/**
|
||||
* Name of the table to use in DynamoDB
|
||||
*
|
||||
* @return String
|
||||
*/
|
||||
@NonNull
|
||||
private final String tableName;
|
||||
/**
|
||||
* Client to be used to access DynamoDB service.
|
||||
*
|
||||
* @return {@link DynamoDbAsyncClient}
|
||||
*/
|
||||
@NonNull
|
||||
private final DynamoDbAsyncClient dynamoDBClient;
|
||||
/**
|
||||
* Client to be used to access Kinesis Data Streams service.
|
||||
*
|
||||
* @return {@link KinesisAsyncClient}
|
||||
*/
|
||||
@NonNull
|
||||
private final KinesisAsyncClient kinesisClient;
|
||||
|
|
@ -90,8 +88,6 @@ public class LeaseManagementConfig {
|
|||
private String streamName;
|
||||
/**
|
||||
* Used to distinguish different workers/processes of a KCL application.
|
||||
*
|
||||
* @return String
|
||||
*/
|
||||
@NonNull
|
||||
private final String workerIdentifier;
|
||||
|
|
@ -198,6 +194,13 @@ public class LeaseManagementConfig {
|
|||
|
||||
private BillingMode billingMode = BillingMode.PAY_PER_REQUEST;
|
||||
|
||||
/**
|
||||
* The list of tags to be applied to the DynamoDB table created for lease management.
|
||||
*
|
||||
* <p>Default value: {@link DefaultSdkAutoConstructList}
|
||||
*/
|
||||
private Collection<Tag> tags = DefaultSdkAutoConstructList.getInstance();
|
||||
|
||||
/**
|
||||
* Frequency (in millis) of the auditor job to scan for partial leases in the lease table.
|
||||
* If the auditor detects any hole in the leases for a stream, then it would trigger shard sync based on
|
||||
|
|
@ -319,7 +322,7 @@ public class LeaseManagementConfig {
|
|||
private LeaseManagementFactory leaseManagementFactory;
|
||||
|
||||
public HierarchicalShardSyncer hierarchicalShardSyncer() {
|
||||
if(hierarchicalShardSyncer == null) {
|
||||
if (hierarchicalShardSyncer == null) {
|
||||
hierarchicalShardSyncer = new HierarchicalShardSyncer();
|
||||
}
|
||||
return hierarchicalShardSyncer;
|
||||
|
|
@ -354,7 +357,7 @@ public class LeaseManagementConfig {
|
|||
initialLeaseTableReadCapacity(),
|
||||
initialLeaseTableWriteCapacity(),
|
||||
hierarchicalShardSyncer(),
|
||||
tableCreatorCallback(), dynamoDbRequestTimeout(), billingMode());
|
||||
tableCreatorCallback(), dynamoDbRequestTimeout(), billingMode(), tags());
|
||||
}
|
||||
return leaseManagementFactory;
|
||||
}
|
||||
|
|
@ -366,7 +369,7 @@ public class LeaseManagementConfig {
|
|||
* @return LeaseManagementFactory
|
||||
*/
|
||||
public LeaseManagementFactory leaseManagementFactory(final LeaseSerializer leaseSerializer, boolean isMultiStreamingMode) {
|
||||
if(leaseManagementFactory == null) {
|
||||
if (leaseManagementFactory == null) {
|
||||
leaseManagementFactory = new DynamoDBLeaseManagementFactory(kinesisClient(),
|
||||
dynamoDBClient(),
|
||||
tableName(),
|
||||
|
|
@ -393,6 +396,7 @@ public class LeaseManagementConfig {
|
|||
tableCreatorCallback(),
|
||||
dynamoDbRequestTimeout(),
|
||||
billingMode(),
|
||||
tags(),
|
||||
leaseSerializer,
|
||||
customShardDetectorProvider(),
|
||||
isMultiStreamingMode,
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
package software.amazon.kinesis.leases;
|
||||
|
||||
import software.amazon.kinesis.common.StreamConfig;
|
||||
import software.amazon.kinesis.coordinator.DeletedStreamListProvider;
|
||||
import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher;
|
||||
import software.amazon.kinesis.metrics.MetricsFactory;
|
||||
|
||||
|
|
@ -31,6 +32,11 @@ public interface LeaseManagementFactory {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
default ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory, StreamConfig streamConfig,
|
||||
DeletedStreamListProvider deletedStreamListProvider) {
|
||||
throw new UnsupportedOperationException("createShardSyncTaskManager method not implemented");
|
||||
}
|
||||
|
||||
DynamoDBLeaseRefresher createLeaseRefresher();
|
||||
|
||||
ShardDetector createShardDetector();
|
||||
|
|
|
|||
|
|
@ -210,8 +210,6 @@ public interface LeaseRefresher {
|
|||
* Update application-specific fields of the given lease in DynamoDB. Does not update fields managed by the leasing
|
||||
* library such as leaseCounter, leaseOwner, or leaseKey.
|
||||
*
|
||||
* @return true if update succeeded, false otherwise
|
||||
*
|
||||
* @throws InvalidStateException if lease table does not exist
|
||||
* @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity
|
||||
* @throws DependencyException if DynamoDB update fails in an unexpected way
|
||||
|
|
|
|||
|
|
@ -23,8 +23,6 @@ import lombok.Setter;
|
|||
import lombok.experimental.Accessors;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import static com.google.common.base.Verify.verifyNotNull;
|
||||
|
||||
@Setter
|
||||
|
|
|
|||
|
|
@ -46,6 +46,16 @@ public interface ShardDetector {
|
|||
*/
|
||||
List<Shard> listShards();
|
||||
|
||||
/**
|
||||
* This method behaves exactly similar to listShards except the fact that this does not consume and throw
|
||||
* ResourceNotFoundException instead of returning empty list.
|
||||
*
|
||||
* @return Shards
|
||||
*/
|
||||
default List<Shard> listShardsWithoutConsumingResourceNotFoundException() {
|
||||
throw new UnsupportedOperationException("listShardsWithoutConsumingResourceNotFoundException not implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* List shards with shard filter.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ import software.amazon.kinesis.metrics.MetricsUtil;
|
|||
@Slf4j
|
||||
@KinesisClientInternalApi
|
||||
public class ShardSyncTask implements ConsumerTask {
|
||||
private final String SHARD_SYNC_TASK_OPERATION = "ShardSyncTask";
|
||||
private static final String SHARD_SYNC_TASK_OPERATION = "ShardSyncTask";
|
||||
|
||||
@NonNull
|
||||
private final ShardDetector shardDetector;
|
||||
|
|
|
|||
|
|
@ -205,7 +205,8 @@ public class ShardSyncTaskManager {
|
|||
|
||||
private void handlePendingShardSyncs(Throwable exception, TaskResult taskResult) {
|
||||
if (exception != null || taskResult.getException() != null) {
|
||||
log.error("Caught exception running {} task: ", currentTask.taskType(), exception != null ? exception : taskResult.getException());
|
||||
log.error("Caught exception running {} task: {}", currentTask.taskType(),
|
||||
exception != null ? exception : taskResult.getException());
|
||||
}
|
||||
// Acquire lock here. If shardSyncRequestPending is false in this completionStage and
|
||||
// submitShardSyncTask is invoked, before completion stage exits (future completes)
|
||||
|
|
|
|||
|
|
@ -332,8 +332,9 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator {
|
|||
|
||||
@Override
|
||||
public void stopLeaseTaker() {
|
||||
takerFuture.cancel(false);
|
||||
|
||||
if (takerFuture != null) {
|
||||
takerFuture.cancel(false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -16,19 +16,23 @@
|
|||
package software.amazon.kinesis.leases.dynamodb;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.function.Function;
|
||||
import lombok.Data;
|
||||
import lombok.NonNull;
|
||||
import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList;
|
||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
||||
import software.amazon.awssdk.services.dynamodb.model.Tag;
|
||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
import software.amazon.kinesis.common.LeaseCleanupConfig;
|
||||
import software.amazon.kinesis.common.StreamConfig;
|
||||
import software.amazon.kinesis.common.StreamIdentifier;
|
||||
import software.amazon.kinesis.coordinator.DeletedStreamListProvider;
|
||||
import software.amazon.kinesis.leases.HierarchicalShardSyncer;
|
||||
import software.amazon.kinesis.leases.KinesisShardDetector;
|
||||
import software.amazon.kinesis.leases.LeaseCleanupManager;
|
||||
|
|
@ -86,6 +90,7 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory {
|
|||
private final TableCreatorCallback tableCreatorCallback;
|
||||
private final Duration dynamoDbRequestTimeout;
|
||||
private final BillingMode billingMode;
|
||||
private final Collection<Tag> tags;
|
||||
private final boolean isMultiStreamMode;
|
||||
private final LeaseCleanupConfig leaseCleanupConfig;
|
||||
|
||||
|
|
@ -343,6 +348,61 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory {
|
|||
hierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, new DynamoDBLeaseSerializer());
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*
|
||||
* @param kinesisClient
|
||||
* @param streamName
|
||||
* @param dynamoDBClient
|
||||
* @param tableName
|
||||
* @param workerIdentifier
|
||||
* @param executorService
|
||||
* @param initialPositionInStream
|
||||
* @param failoverTimeMillis
|
||||
* @param epsilonMillis
|
||||
* @param maxLeasesForWorker
|
||||
* @param maxLeasesToStealAtOneTime
|
||||
* @param maxLeaseRenewalThreads
|
||||
* @param cleanupLeasesUponShardCompletion
|
||||
* @param ignoreUnexpectedChildShards
|
||||
* @param shardSyncIntervalMillis
|
||||
* @param consistentReads
|
||||
* @param listShardsBackoffTimeMillis
|
||||
* @param maxListShardsRetryAttempts
|
||||
* @param maxCacheMissesBeforeReload
|
||||
* @param listShardsCacheAllowedAgeInSeconds
|
||||
* @param cacheMissWarningModulus
|
||||
* @param initialLeaseTableReadCapacity
|
||||
* @param initialLeaseTableWriteCapacity
|
||||
* @param hierarchicalShardSyncer
|
||||
* @param tableCreatorCallback
|
||||
* @param dynamoDbRequestTimeout
|
||||
* @param billingMode
|
||||
* @param tags
|
||||
*/
|
||||
@Deprecated
|
||||
public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName,
|
||||
final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier,
|
||||
final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream,
|
||||
final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker,
|
||||
final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads,
|
||||
final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards,
|
||||
final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis,
|
||||
final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload,
|
||||
final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus,
|
||||
final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity,
|
||||
final HierarchicalShardSyncer hierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback,
|
||||
Duration dynamoDbRequestTimeout, BillingMode billingMode, Collection<Tag> tags) {
|
||||
|
||||
this(kinesisClient, new StreamConfig(StreamIdentifier.singleStreamInstance(streamName), initialPositionInStream), dynamoDBClient, tableName,
|
||||
workerIdentifier, executorService, failoverTimeMillis, epsilonMillis, maxLeasesForWorker,
|
||||
maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion,
|
||||
ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis,
|
||||
maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds,
|
||||
cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity,
|
||||
hierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, new DynamoDBLeaseSerializer());
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*
|
||||
|
|
@ -373,6 +433,7 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory {
|
|||
* @param dynamoDbRequestTimeout
|
||||
* @param billingMode
|
||||
*/
|
||||
@Deprecated
|
||||
private DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final StreamConfig streamConfig,
|
||||
final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier,
|
||||
final ExecutorService executorService, final long failoverTimeMillis, final long epsilonMillis, final long leaseTakerIntervalMillis,
|
||||
|
|
@ -384,13 +445,65 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory {
|
|||
final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity,
|
||||
final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback,
|
||||
Duration dynamoDbRequestTimeout, BillingMode billingMode, LeaseSerializer leaseSerializer) {
|
||||
this(kinesisClient, streamConfig, dynamoDBClient, tableName,
|
||||
workerIdentifier, executorService, failoverTimeMillis, epsilonMillis, maxLeasesForWorker,
|
||||
maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion,
|
||||
ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis,
|
||||
maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds,
|
||||
cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity,
|
||||
deprecatedHierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode,
|
||||
DefaultSdkAutoConstructList.getInstance(), leaseSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*
|
||||
* @param kinesisClient
|
||||
* @param streamConfig
|
||||
* @param dynamoDBClient
|
||||
* @param tableName
|
||||
* @param workerIdentifier
|
||||
* @param executorService
|
||||
* @param failoverTimeMillis
|
||||
* @param epsilonMillis
|
||||
* @param maxLeasesForWorker
|
||||
* @param maxLeasesToStealAtOneTime
|
||||
* @param maxLeaseRenewalThreads
|
||||
* @param cleanupLeasesUponShardCompletion
|
||||
* @param ignoreUnexpectedChildShards
|
||||
* @param shardSyncIntervalMillis
|
||||
* @param consistentReads
|
||||
* @param listShardsBackoffTimeMillis
|
||||
* @param maxListShardsRetryAttempts
|
||||
* @param maxCacheMissesBeforeReload
|
||||
* @param listShardsCacheAllowedAgeInSeconds
|
||||
* @param cacheMissWarningModulus
|
||||
* @param initialLeaseTableReadCapacity
|
||||
* @param initialLeaseTableWriteCapacity
|
||||
* @param deprecatedHierarchicalShardSyncer
|
||||
* @param tableCreatorCallback
|
||||
* @param dynamoDbRequestTimeout
|
||||
* @param billingMode
|
||||
* @param tags
|
||||
*/
|
||||
private DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final StreamConfig streamConfig,
|
||||
final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier,
|
||||
final ExecutorService executorService, final long failoverTimeMillis, final long epsilonMillis, final long leaseTakerIntervalMillis,
|
||||
final int maxLeasesForWorker, final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads,
|
||||
final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards,
|
||||
final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis,
|
||||
final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload,
|
||||
final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus,
|
||||
final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity,
|
||||
final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback,
|
||||
Duration dynamoDbRequestTimeout, BillingMode billingMode, Collection<Tag> tags, LeaseSerializer leaseSerializer) {
|
||||
this(kinesisClient, dynamoDBClient, tableName,
|
||||
workerIdentifier, executorService, failoverTimeMillis, epsilonMillis, leaseTakerIntervalMillis, maxLeasesForWorker,
|
||||
maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion,
|
||||
ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis,
|
||||
maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds,
|
||||
cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity,
|
||||
deprecatedHierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, leaseSerializer,
|
||||
deprecatedHierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, tags, leaseSerializer,
|
||||
null, false, LeaseManagementConfig.DEFAULT_LEASE_CLEANUP_CONFIG);
|
||||
this.streamConfig = streamConfig;
|
||||
}
|
||||
|
|
@ -437,7 +550,7 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory {
|
|||
final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus,
|
||||
final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity,
|
||||
final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback,
|
||||
Duration dynamoDbRequestTimeout, BillingMode billingMode, LeaseSerializer leaseSerializer,
|
||||
Duration dynamoDbRequestTimeout, BillingMode billingMode, Collection<Tag> tags, LeaseSerializer leaseSerializer,
|
||||
Function<StreamConfig, ShardDetector> customShardDetectorProvider, boolean isMultiStreamMode,
|
||||
LeaseCleanupConfig leaseCleanupConfig) {
|
||||
this.kinesisClient = kinesisClient;
|
||||
|
|
@ -470,6 +583,7 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory {
|
|||
this.customShardDetectorProvider = customShardDetectorProvider;
|
||||
this.isMultiStreamMode = isMultiStreamMode;
|
||||
this.leaseCleanupConfig = leaseCleanupConfig;
|
||||
this.tags = tags;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -507,6 +621,20 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory {
|
|||
*/
|
||||
@Override
|
||||
public ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory, StreamConfig streamConfig) {
|
||||
return createShardSyncTaskManager(metricsFactory, streamConfig, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create ShardSyncTaskManager from the streamConfig passed
|
||||
*
|
||||
* @param metricsFactory - factory to get metrics object
|
||||
* @param streamConfig - streamConfig for which ShardSyncTaskManager needs to be created
|
||||
* @param deletedStreamListProvider - store for capturing the streams which are deleted in kinesis
|
||||
* @return ShardSyncTaskManager
|
||||
*/
|
||||
@Override
|
||||
public ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory, StreamConfig streamConfig,
|
||||
DeletedStreamListProvider deletedStreamListProvider) {
|
||||
return new ShardSyncTaskManager(this.createShardDetector(streamConfig),
|
||||
this.createLeaseRefresher(),
|
||||
streamConfig.initialPositionInStreamExtended(),
|
||||
|
|
@ -514,14 +642,16 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory {
|
|||
ignoreUnexpectedChildShards,
|
||||
shardSyncIntervalMillis,
|
||||
executorService,
|
||||
new HierarchicalShardSyncer(isMultiStreamMode, streamConfig.streamIdentifier().toString()),
|
||||
new HierarchicalShardSyncer(isMultiStreamMode, streamConfig.streamIdentifier().toString(),
|
||||
deletedStreamListProvider),
|
||||
metricsFactory);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public DynamoDBLeaseRefresher createLeaseRefresher() {
|
||||
return new DynamoDBLeaseRefresher(tableName, dynamoDBClient, leaseSerializer, consistentReads,
|
||||
tableCreatorCallback, dynamoDbRequestTimeout, billingMode);
|
||||
tableCreatorCallback, dynamoDbRequestTimeout, billingMode, tags);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import com.google.common.collect.ImmutableMap;
|
|||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
|
@ -25,6 +26,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.TimeoutException;
|
||||
import lombok.NonNull;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList;
|
||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
||||
import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
|
||||
import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate;
|
||||
|
|
@ -46,6 +48,7 @@ import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException;
|
|||
import software.amazon.awssdk.services.dynamodb.model.ScanRequest;
|
||||
import software.amazon.awssdk.services.dynamodb.model.ScanResponse;
|
||||
import software.amazon.awssdk.services.dynamodb.model.TableStatus;
|
||||
import software.amazon.awssdk.services.dynamodb.model.Tag;
|
||||
import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest;
|
||||
import software.amazon.awssdk.utils.CollectionUtils;
|
||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
||||
|
|
@ -77,6 +80,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
|
||||
private final Duration dynamoDbRequestTimeout;
|
||||
private final BillingMode billingMode;
|
||||
private final Collection<Tag> tags;
|
||||
|
||||
private boolean newTableCreated = false;
|
||||
|
||||
|
|
@ -143,10 +147,30 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
* @param dynamoDbRequestTimeout
|
||||
* @param billingMode
|
||||
*/
|
||||
@Deprecated
|
||||
public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient,
|
||||
final LeaseSerializer serializer, final boolean consistentReads,
|
||||
@NonNull final TableCreatorCallback tableCreatorCallback, Duration dynamoDbRequestTimeout,
|
||||
final BillingMode billingMode) {
|
||||
this(table, dynamoDBClient, serializer, consistentReads, tableCreatorCallback, dynamoDbRequestTimeout,
|
||||
billingMode, DefaultSdkAutoConstructList.getInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param table
|
||||
* @param dynamoDBClient
|
||||
* @param serializer
|
||||
* @param consistentReads
|
||||
* @param tableCreatorCallback
|
||||
* @param dynamoDbRequestTimeout
|
||||
* @param billingMode
|
||||
* @param tags
|
||||
*/
|
||||
public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient,
|
||||
final LeaseSerializer serializer, final boolean consistentReads,
|
||||
@NonNull final TableCreatorCallback tableCreatorCallback, Duration dynamoDbRequestTimeout,
|
||||
final BillingMode billingMode, final Collection<Tag> tags) {
|
||||
this.table = table;
|
||||
this.dynamoDBClient = dynamoDBClient;
|
||||
this.serializer = serializer;
|
||||
|
|
@ -154,6 +178,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
this.tableCreatorCallback = tableCreatorCallback;
|
||||
this.dynamoDbRequestTimeout = dynamoDbRequestTimeout;
|
||||
this.billingMode = billingMode;
|
||||
this.tags = tags;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -162,20 +187,13 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
@Override
|
||||
public boolean createLeaseTableIfNotExists(@NonNull final Long readCapacity, @NonNull final Long writeCapacity)
|
||||
throws ProvisionedThroughputException, DependencyException {
|
||||
ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(readCapacity)
|
||||
final CreateTableRequest.Builder builder = createTableRequestBuilder();
|
||||
if (BillingMode.PROVISIONED.equals(billingMode)) {
|
||||
ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(readCapacity)
|
||||
.writeCapacityUnits(writeCapacity).build();
|
||||
final CreateTableRequest request;
|
||||
if(BillingMode.PAY_PER_REQUEST.equals(billingMode)){
|
||||
request = CreateTableRequest.builder().tableName(table).keySchema(serializer.getKeySchema())
|
||||
.attributeDefinitions(serializer.getAttributeDefinitions())
|
||||
.billingMode(billingMode).build();
|
||||
}else{
|
||||
request = CreateTableRequest.builder().tableName(table).keySchema(serializer.getKeySchema())
|
||||
.attributeDefinitions(serializer.getAttributeDefinitions()).provisionedThroughput(throughput)
|
||||
.build();
|
||||
builder.provisionedThroughput(throughput);
|
||||
}
|
||||
|
||||
return createTableIfNotExists(request);
|
||||
return createTableIfNotExists(builder.build());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -184,9 +202,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
@Override
|
||||
public boolean createLeaseTableIfNotExists()
|
||||
throws ProvisionedThroughputException, DependencyException {
|
||||
final CreateTableRequest request = CreateTableRequest.builder().tableName(table).keySchema(serializer.getKeySchema())
|
||||
.attributeDefinitions(serializer.getAttributeDefinitions())
|
||||
.billingMode(billingMode).build();
|
||||
final CreateTableRequest request = createTableRequestBuilder().build();
|
||||
|
||||
return createTableIfNotExists(request);
|
||||
}
|
||||
|
|
@ -429,7 +445,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
@Override
|
||||
public boolean createLeaseIfNotExists(@NonNull final Lease lease)
|
||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||
log.debug("Creating lease {}", lease);
|
||||
log.debug("Creating lease: {}", lease);
|
||||
|
||||
PutItemRequest request = PutItemRequest.builder().tableName(table).item(serializer.toDynamoRecord(lease))
|
||||
.expected(serializer.getDynamoNonexistantExpectation()).build();
|
||||
|
|
@ -452,6 +468,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
} catch (DynamoDbException | TimeoutException e) {
|
||||
throw convertAndRethrowExceptions("create", lease.leaseKey(), e);
|
||||
}
|
||||
log.info("Created lease: {}", lease);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -476,7 +493,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
return null;
|
||||
} else {
|
||||
final Lease lease = serializer.fromDynamoRecord(dynamoRecord);
|
||||
log.debug("Got lease {}", lease);
|
||||
log.debug("Retrieved lease: {}", lease);
|
||||
return lease;
|
||||
}
|
||||
} catch (ExecutionException e) {
|
||||
|
|
@ -535,6 +552,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
}
|
||||
|
||||
lease.leaseCounter(lease.leaseCounter() + 1);
|
||||
log.debug("Renewed lease with key {}", lease.leaseKey());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -582,6 +600,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
lease.ownerSwitchesSinceCheckpoint(lease.ownerSwitchesSinceCheckpoint() + 1);
|
||||
}
|
||||
|
||||
log.info("Transferred lease {} ownership from {} to {}", lease.leaseKey(), oldOwner, owner);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -620,6 +640,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
|
||||
lease.leaseOwner(null);
|
||||
lease.leaseCounter(lease.leaseCounter() + 1);
|
||||
|
||||
log.info("Evicted lease with leaseKey {}", lease.leaseKey());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -648,6 +670,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
} catch (DynamoDbException | TimeoutException e) {
|
||||
throw convertAndRethrowExceptions("deleteAll", lease.leaseKey(), e);
|
||||
}
|
||||
log.debug("Deleted lease {} from table {}", lease.leaseKey(), table);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -675,6 +698,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
} catch (DynamoDbException | TimeoutException e) {
|
||||
throw convertAndRethrowExceptions("delete", lease.leaseKey(), e);
|
||||
}
|
||||
|
||||
log.info("Deleted lease with leaseKey {}", lease.leaseKey());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -683,7 +708,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
@Override
|
||||
public boolean updateLease(@NonNull final Lease lease)
|
||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||
log.debug("Updating lease {}", lease);
|
||||
log.debug("Updating lease: {}", lease);
|
||||
|
||||
final AWSExceptionManager exceptionManager = createExceptionManager();
|
||||
exceptionManager.add(ConditionalCheckFailedException.class, t -> t);
|
||||
|
|
@ -711,6 +736,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
}
|
||||
|
||||
lease.leaseCounter(lease.leaseCounter() + 1);
|
||||
log.info("Updated lease {}.", lease.leaseKey());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -738,6 +764,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
} catch (DynamoDbException | TimeoutException e) {
|
||||
throw convertAndRethrowExceptions("update", lease.leaseKey(), e);
|
||||
}
|
||||
|
||||
log.info("Updated lease without expectation {}.", lease);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -775,6 +803,16 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher {
|
|||
}
|
||||
}
|
||||
|
||||
private CreateTableRequest.Builder createTableRequestBuilder() {
|
||||
final CreateTableRequest.Builder builder = CreateTableRequest.builder().tableName(table).keySchema(serializer.getKeySchema())
|
||||
.attributeDefinitions(serializer.getAttributeDefinitions())
|
||||
.tags(tags);
|
||||
if (BillingMode.PAY_PER_REQUEST.equals(billingMode)) {
|
||||
builder.billingMode(billingMode);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
private AWSExceptionManager createExceptionManager() {
|
||||
final AWSExceptionManager exceptionManager = new AWSExceptionManager();
|
||||
exceptionManager.add(DynamoDbException.class, t -> t);
|
||||
|
|
|
|||
|
|
@ -242,7 +242,7 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer {
|
|||
|
||||
/**
|
||||
* Internal method to return a lease with a specific lease key only if we currently hold it.
|
||||
*
|
||||
*
|
||||
* @param leaseKey key of lease to return
|
||||
* @param now current timestamp for old-ness checking
|
||||
* @return non-authoritative copy of the held lease, or null if we don't currently hold it
|
||||
|
|
@ -309,6 +309,7 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer {
|
|||
long startTime = System.currentTimeMillis();
|
||||
boolean success = false;
|
||||
try {
|
||||
log.info("Updating lease from {} to {}", authoritativeLease, lease);
|
||||
synchronized (authoritativeLease) {
|
||||
authoritativeLease.update(lease);
|
||||
boolean updatedLease = leaseRefresher.updateLease(authoritativeLease);
|
||||
|
|
@ -325,7 +326,7 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer {
|
|||
/*
|
||||
* Remove only if the value currently in the map is the same as the authoritative lease. We're
|
||||
* guarding against a pause after the concurrency token check above. It plays out like so:
|
||||
*
|
||||
*
|
||||
* 1) Concurrency token check passes
|
||||
* 2) Pause. Lose lease, re-acquire lease. This requires at least one lease counter update.
|
||||
* 3) Unpause. leaseRefresher.updateLease fails conditional write due to counter updates, returns
|
||||
|
|
@ -333,7 +334,7 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer {
|
|||
* 4) ownedLeases.remove(key, value) doesn't do anything because authoritativeLease does not
|
||||
* .equals() the re-acquired version in the map on the basis of lease counter. This is what we want.
|
||||
* If we just used ownedLease.remove(key), we would have pro-actively removed a lease incorrectly.
|
||||
*
|
||||
*
|
||||
* Note that there is a subtlety here - Lease.equals() deliberately does not check the concurrency
|
||||
* token, but it does check the lease counter, so this scheme works.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer {
|
|||
result.put(PENDING_CHECKPOINT_STATE_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber()));
|
||||
}
|
||||
|
||||
if(lease.hashKeyRangeForLease() != null) {
|
||||
if (lease.hashKeyRangeForLease() != null) {
|
||||
result.put(STARTING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey()));
|
||||
result.put(ENDING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey()));
|
||||
}
|
||||
|
|
@ -224,11 +224,13 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer {
|
|||
public Map<String, AttributeValueUpdate> getDynamoTakeLeaseUpdate(final Lease lease, String owner) {
|
||||
Map<String, AttributeValueUpdate> result = new HashMap<>();
|
||||
|
||||
result.put(LEASE_OWNER_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(owner)).action(AttributeAction.PUT).build());
|
||||
result.put(LEASE_OWNER_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(owner))
|
||||
.action(AttributeAction.PUT).build());
|
||||
|
||||
String oldOwner = lease.leaseOwner();
|
||||
if (oldOwner != null && !oldOwner.equals(owner)) {
|
||||
result.put(OWNER_SWITCHES_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(1L)).action(AttributeAction.ADD).build());
|
||||
result.put(OWNER_SWITCHES_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(1L))
|
||||
.action(AttributeAction.ADD).build());
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
@ -257,7 +259,8 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer {
|
|||
|
||||
if (lease.pendingCheckpoint() != null && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) {
|
||||
result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpoint().sequenceNumber())));
|
||||
result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpoint().subSequenceNumber())));
|
||||
result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, putUpdate(DynamoUtils.createAttributeValue(
|
||||
lease.pendingCheckpoint().subSequenceNumber())));
|
||||
} else {
|
||||
result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build());
|
||||
result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build());
|
||||
|
|
@ -274,7 +277,7 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer {
|
|||
result.put(CHILD_SHARD_IDS_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.childShardIds())));
|
||||
}
|
||||
|
||||
if(lease.hashKeyRangeForLease() != null) {
|
||||
if (lease.hashKeyRangeForLease() != null) {
|
||||
result.put(STARTING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey())));
|
||||
result.put(ENDING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey())));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,7 +73,6 @@ public class DynamoDBLeaseTaker implements LeaseTaker {
|
|||
private long veryOldLeaseDurationNanosMultiplier = 3;
|
||||
private long lastScanTimeNanos = 0L;
|
||||
|
||||
|
||||
public DynamoDBLeaseTaker(LeaseRefresher leaseRefresher, String workerIdentifier, long leaseDurationMillis,
|
||||
final MetricsFactory metricsFactory) {
|
||||
this.leaseRefresher = leaseRefresher;
|
||||
|
|
@ -184,7 +183,6 @@ public class DynamoDBLeaseTaker implements LeaseTaker {
|
|||
MetricsUtil.addSuccessAndLatency(scope, "ListLeases", success, startTime, MetricsLevel.DETAILED);
|
||||
}
|
||||
|
||||
|
||||
if (lastException != null) {
|
||||
log.error("Worker {} could not scan leases table, aborting TAKE_LEASES_DIMENSION. Exception caught by"
|
||||
+ " last retry:", workerIdentifier, lastException);
|
||||
|
|
@ -259,6 +257,7 @@ public class DynamoDBLeaseTaker implements LeaseTaker {
|
|||
leasesToTake = leasesToTake.stream().map(lease -> {
|
||||
if (lease.isMarkedForLeaseSteal()) {
|
||||
try {
|
||||
log.debug("Updating stale lease {}.", lease.leaseKey());
|
||||
return leaseRefresher.getLease(lease.leaseKey());
|
||||
} catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) {
|
||||
log.warn("Failed to fetch latest state of the lease {} that needs to be stolen, "
|
||||
|
|
@ -318,8 +317,7 @@ public class DynamoDBLeaseTaker implements LeaseTaker {
|
|||
for (Lease lease : freshList) {
|
||||
String leaseKey = lease.leaseKey();
|
||||
|
||||
Lease oldLease = allLeases.get(leaseKey);
|
||||
allLeases.put(leaseKey, lease);
|
||||
final Lease oldLease = allLeases.put(leaseKey, lease);
|
||||
notUpdated.remove(leaseKey);
|
||||
|
||||
if (oldLease != null) {
|
||||
|
|
@ -392,7 +390,6 @@ public class DynamoDBLeaseTaker implements LeaseTaker {
|
|||
Set<Lease> leasesToTake = new HashSet<>();
|
||||
final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, TAKE_LEASES_DIMENSION);
|
||||
MetricsUtil.addWorkerIdentifier(scope, workerIdentifier);
|
||||
List<Lease> veryOldLeases = new ArrayList<>();
|
||||
|
||||
final int numAvailableLeases = availableLeases.size();
|
||||
int numLeases = 0;
|
||||
|
|
@ -410,14 +407,13 @@ public class DynamoDBLeaseTaker implements LeaseTaker {
|
|||
return leasesToTake;
|
||||
}
|
||||
|
||||
|
||||
int target;
|
||||
if (numWorkers >= numLeases) {
|
||||
// If we have n leases and n or more workers, each worker can have up to 1 lease, including myself.
|
||||
target = 1;
|
||||
} else {
|
||||
/*
|
||||
* numWorkers must be < numLeases.
|
||||
* if we have made it here, it means there are more leases than workers
|
||||
*
|
||||
* Our target for each worker is numLeases / numWorkers (+1 if numWorkers doesn't evenly divide numLeases)
|
||||
*/
|
||||
|
|
@ -443,9 +439,9 @@ public class DynamoDBLeaseTaker implements LeaseTaker {
|
|||
// If there are leases that have been expired for an extended period of
|
||||
// time, take them with priority, disregarding the target (computed
|
||||
// later) but obeying the maximum limit per worker.
|
||||
veryOldLeases = allLeases.values().stream()
|
||||
.filter(lease -> System.nanoTime() - lease.lastCounterIncrementNanos()
|
||||
> veryOldLeaseDurationNanosMultiplier * leaseDurationNanos)
|
||||
final long nanoThreshold = System.nanoTime() - (veryOldLeaseDurationNanosMultiplier * leaseDurationNanos);
|
||||
final List<Lease> veryOldLeases = allLeases.values().stream()
|
||||
.filter(lease -> nanoThreshold > lease.lastCounterIncrementNanos())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
if (!veryOldLeases.isEmpty()) {
|
||||
|
|
@ -489,7 +485,6 @@ public class DynamoDBLeaseTaker implements LeaseTaker {
|
|||
workerIdentifier, numLeases, numAvailableLeases, numWorkers, target, myCount,
|
||||
leasesToTake.size());
|
||||
}
|
||||
|
||||
} finally {
|
||||
scope.addData("AvailableLeases", availableLeases.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY);
|
||||
scope.addData("LeaseSpillover", leaseSpillover, StandardUnit.COUNT, MetricsLevel.SUMMARY);
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ package software.amazon.kinesis.leases.dynamodb;
|
|||
@FunctionalInterface
|
||||
public interface TableCreatorCallback {
|
||||
/**
|
||||
* NoOp implemetation for TableCreatorCallback
|
||||
* NoOp implementation for TableCreatorCallback
|
||||
*/
|
||||
TableCreatorCallback NOOP_TABLE_CREATOR_CALLBACK = (TableCreatorCallbackInput tableCreatorCallbackInput) -> {
|
||||
// Do nothing
|
||||
|
|
|
|||
|
|
@ -19,9 +19,9 @@ package software.amazon.kinesis.leases.exceptions;
|
|||
*/
|
||||
public class CustomerApplicationException extends Exception {
|
||||
|
||||
public CustomerApplicationException(Throwable e) { super(e);}
|
||||
public CustomerApplicationException(Throwable e) { super(e); }
|
||||
|
||||
public CustomerApplicationException(String message, Throwable e) { super(message, e);}
|
||||
public CustomerApplicationException(String message, Throwable e) { super(message, e); }
|
||||
|
||||
public CustomerApplicationException(String message) { super(message);}
|
||||
public CustomerApplicationException(String message) { super(message); }
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,14 +12,13 @@ import software.amazon.kinesis.metrics.MetricsScope;
|
|||
* Helper class to sync leases with shards of the Kinesis stream.
|
||||
* It will create new leases/activities when it discovers new Kinesis shards (bootstrap/resharding).
|
||||
* It deletes leases for shards that have been trimmed from Kinesis, or if we've completed processing it
|
||||
* and begun processing it's child shards.
|
||||
* and begun processing its child shards.
|
||||
*
|
||||
* <p>NOTE: This class is deprecated and will be removed in a future release.</p>
|
||||
*/
|
||||
@Deprecated
|
||||
public class ShardSyncer {
|
||||
private static final HierarchicalShardSyncer HIERARCHICAL_SHARD_SYNCER = new HierarchicalShardSyncer();
|
||||
private static final boolean garbageCollectLeases = true;
|
||||
|
||||
/**
|
||||
* <p>NOTE: This method is deprecated and will be removed in a future release.</p>
|
||||
|
|
|
|||
|
|
@ -93,24 +93,11 @@ class ConsumerStates {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The initial state that any {@link ShardConsumer} should start in.
|
||||
*/
|
||||
static final ConsumerState INITIAL_STATE = ShardConsumerState.WAITING_ON_PARENT_SHARDS.consumerState();
|
||||
|
||||
private static ConsumerState shutdownStateFor(ShutdownReason reason) {
|
||||
switch (reason) {
|
||||
case REQUESTED:
|
||||
return ShardConsumerState.SHUTDOWN_REQUESTED.consumerState();
|
||||
case SHARD_END:
|
||||
case LEASE_LOST:
|
||||
return ShardConsumerState.SHUTTING_DOWN.consumerState();
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown reason: " + reason);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the initial state of a shard consumer. This causes the consumer to remain blocked until the all parent
|
||||
* shards have been completed.
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|||
import software.amazon.kinesis.checkpoint.Checkpoint;
|
||||
import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
import software.amazon.kinesis.leases.MultiStreamLease;
|
||||
import software.amazon.kinesis.leases.ShardInfo;
|
||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||
import software.amazon.kinesis.metrics.MetricsFactory;
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ public class ProcessTask implements ConsumerTask {
|
|||
*/
|
||||
@Override
|
||||
public TaskResult call() {
|
||||
/**
|
||||
/*
|
||||
* NOTE: the difference between appScope and shardScope is, appScope doesn't have shardId as a dimension,
|
||||
* therefore all data added to appScope, although from different shard consumer, will be sent to the same metric,
|
||||
* which is the app-level MillsBehindLatest metric.
|
||||
|
|
@ -180,8 +180,6 @@ public class ProcessTask implements ConsumerTask {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
private List<KinesisClientRecord> deaggregateAnyKplRecords(List<KinesisClientRecord> records) {
|
||||
if (shard == null) {
|
||||
return aggregatorUtil.deaggregate(records);
|
||||
|
|
@ -214,8 +212,10 @@ public class ProcessTask implements ConsumerTask {
|
|||
log.debug("Calling application processRecords() with {} records from {}", records.size(),
|
||||
shardInfoId);
|
||||
|
||||
final ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder().records(records).cacheExitTime(input.cacheExitTime()).cacheEntryTime(input.cacheEntryTime())
|
||||
.checkpointer(recordProcessorCheckpointer).millisBehindLatest(input.millisBehindLatest()).build();
|
||||
final ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder().records(records)
|
||||
.cacheExitTime(input.cacheExitTime()).cacheEntryTime(input.cacheEntryTime())
|
||||
.isAtShardEnd(input.isAtShardEnd()).checkpointer(recordProcessorCheckpointer)
|
||||
.millisBehindLatest(input.millisBehindLatest()).build();
|
||||
|
||||
final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, PROCESS_TASK_OPERATION);
|
||||
shardInfo.streamIdentifierSerOpt()
|
||||
|
|
@ -245,28 +245,6 @@ public class ProcessTask implements ConsumerTask {
|
|||
return (!records.isEmpty()) || shouldCallProcessRecordsEvenForEmptyRecordList;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emits metrics, and sleeps if there are no records available
|
||||
*
|
||||
* @param startTimeMillis
|
||||
* the time when the task started
|
||||
*/
|
||||
private void handleNoRecords(long startTimeMillis) {
|
||||
log.debug("Kinesis didn't return any records for shard {}", shardInfoId);
|
||||
|
||||
long sleepTimeMillis = idleTimeInMilliseconds - (System.currentTimeMillis() - startTimeMillis);
|
||||
if (sleepTimeMillis > 0) {
|
||||
sleepTimeMillis = Math.max(sleepTimeMillis, idleTimeInMilliseconds);
|
||||
try {
|
||||
log.debug("Sleeping for {} ms since there were no new records in shard {}", sleepTimeMillis,
|
||||
shardInfoId);
|
||||
Thread.sleep(sleepTimeMillis);
|
||||
} catch (InterruptedException e) {
|
||||
log.debug("ShardId {}: Sleep was interrupted", shardInfoId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskType taskType() {
|
||||
return taskType;
|
||||
|
|
|
|||
|
|
@ -30,9 +30,6 @@ import lombok.Getter;
|
|||
import lombok.NonNull;
|
||||
import lombok.experimental.Accessors;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
||||
import org.reactivestreams.Subscription;
|
||||
|
||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
||||
import software.amazon.kinesis.exceptions.internal.BlockedOnParentShardException;
|
||||
import software.amazon.kinesis.leases.Lease;
|
||||
|
|
@ -66,7 +63,13 @@ public class ShardConsumer {
|
|||
private final ShardConsumerArgument shardConsumerArgument;
|
||||
@NonNull
|
||||
private final Optional<Long> logWarningForTaskAfterMillis;
|
||||
|
||||
/**
|
||||
* @deprecated unused; to be removed in a "major" version bump
|
||||
*/
|
||||
@Deprecated
|
||||
private final Function<ConsumerTask, ConsumerTask> taskMetricsDecorator;
|
||||
|
||||
private final int bufferSize;
|
||||
private final TaskExecutionListener taskExecutionListener;
|
||||
private final String streamIdentifier;
|
||||
|
|
@ -80,7 +83,7 @@ public class ShardConsumer {
|
|||
private volatile Instant taskDispatchedAt;
|
||||
private volatile boolean taskIsRunning = false;
|
||||
|
||||
/*
|
||||
/**
|
||||
* Tracks current state. It is only updated via the consumeStream/shutdown APIs. Therefore we don't do
|
||||
* much coordination/synchronization to handle concurrent reads/updates.
|
||||
*/
|
||||
|
|
@ -186,7 +189,6 @@ public class ShardConsumer {
|
|||
}
|
||||
stateChangeFuture = initializeComplete();
|
||||
}
|
||||
|
||||
} catch (InterruptedException e) {
|
||||
//
|
||||
// Ignored should be handled by scheduler
|
||||
|
|
@ -206,7 +208,6 @@ public class ShardConsumer {
|
|||
throw (Error) t;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ class ShardConsumerSubscriber implements Subscriber<RecordsRetrieved> {
|
|||
@Deprecated
|
||||
ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
|
||||
ShardConsumer shardConsumer) {
|
||||
this(recordsPublisher,executorService,bufferSize,shardConsumer, LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
|
||||
this(recordsPublisher, executorService, bufferSize, shardConsumer, LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
|
||||
}
|
||||
|
||||
ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
|
||||
|
|
@ -74,7 +74,6 @@ class ShardConsumerSubscriber implements Subscriber<RecordsRetrieved> {
|
|||
this.shardInfoId = ShardInfo.getLeaseKey(shardConsumer.shardInfo());
|
||||
}
|
||||
|
||||
|
||||
void startSubscriptions() {
|
||||
synchronized (lockObject) {
|
||||
// Setting the lastRequestTime to allow for health checks to restart subscriptions if they failed to
|
||||
|
|
@ -131,7 +130,9 @@ class ShardConsumerSubscriber implements Subscriber<RecordsRetrieved> {
|
|||
Duration timeSinceLastResponse = Duration.between(lastRequestTime, now);
|
||||
if (timeSinceLastResponse.toMillis() > maxTimeBetweenRequests) {
|
||||
log.error(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: Last request was dispatched at {}, but no response as of {} ({}). Cancelling subscription, and restarting. Last successful request details -- {}",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
shardInfoId, lastRequestTime, now, timeSinceLastResponse, recordsPublisher.getLastSuccessfulRequestDetails());
|
||||
cancel();
|
||||
|
||||
|
|
@ -200,11 +201,11 @@ class ShardConsumerSubscriber implements Subscriber<RecordsRetrieved> {
|
|||
|
||||
protected void logOnErrorReadTimeoutWarning(Throwable t) {
|
||||
log.warn("{}: onError(). Cancelling subscription, and marking self as failed. KCL will"
|
||||
+ " recreate the subscription as neccessary to continue processing. If you "
|
||||
+ "are seeing this warning frequently consider increasing the SDK timeouts "
|
||||
+ "by providing an OverrideConfiguration to the kinesis client. Alternatively you"
|
||||
+ "can configure LifecycleConfig.readTimeoutsToIgnoreBeforeWarning to suppress"
|
||||
+ "intermittent ReadTimeout warnings. Last successful request details -- {}",
|
||||
+ " recreate the subscription as necessary to continue processing. If you"
|
||||
+ " are seeing this warning frequently consider increasing the SDK timeouts"
|
||||
+ " by providing an OverrideConfiguration to the kinesis client. Alternatively you"
|
||||
+ " can configure LifecycleConfig.readTimeoutsToIgnoreBeforeWarning to suppress"
|
||||
+ " intermittent ReadTimeout warnings. Last successful request details -- {}",
|
||||
shardInfoId, recordsPublisher.getLastSuccessfulRequestDetails(), t);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import software.amazon.kinesis.leases.HierarchicalShardSyncer;
|
|||
import software.amazon.kinesis.leases.Lease;
|
||||
import software.amazon.kinesis.leases.LeaseCleanupManager;
|
||||
import software.amazon.kinesis.leases.LeaseCoordinator;
|
||||
import software.amazon.kinesis.leases.LeaseRefresher;
|
||||
import software.amazon.kinesis.leases.ShardDetector;
|
||||
import software.amazon.kinesis.leases.ShardInfo;
|
||||
import software.amazon.kinesis.leases.UpdateField;
|
||||
|
|
@ -54,7 +55,6 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
|||
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
|
|
@ -66,6 +66,14 @@ import java.util.stream.Collectors;
|
|||
public class ShutdownTask implements ConsumerTask {
|
||||
private static final String SHUTDOWN_TASK_OPERATION = "ShutdownTask";
|
||||
private static final String RECORD_PROCESSOR_SHUTDOWN_METRIC = "RecordProcessor.shutdown";
|
||||
|
||||
/**
|
||||
* Reusable, immutable {@link LeaseLostInput}.
|
||||
*/
|
||||
private static final LeaseLostInput LEASE_LOST_INPUT = LeaseLostInput.builder().build();
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
@VisibleForTesting
|
||||
static final int RETRY_RANDOM_MAX_RANGE = 30;
|
||||
|
||||
|
|
@ -101,8 +109,6 @@ public class ShutdownTask implements ConsumerTask {
|
|||
@NonNull
|
||||
private final LeaseCleanupManager leaseCleanupManager;
|
||||
|
||||
private static final Function<ShardInfo, String> leaseKeyProvider = shardInfo -> ShardInfo.getLeaseKey(shardInfo);
|
||||
|
||||
/*
|
||||
* Invokes ShardRecordProcessor shutdown() API.
|
||||
* (non-Javadoc)
|
||||
|
|
@ -114,74 +120,75 @@ public class ShutdownTask implements ConsumerTask {
|
|||
recordProcessorCheckpointer.checkpointer().operation(SHUTDOWN_TASK_OPERATION);
|
||||
final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, SHUTDOWN_TASK_OPERATION);
|
||||
|
||||
Exception exception;
|
||||
|
||||
final String leaseKey = ShardInfo.getLeaseKey(shardInfo);
|
||||
try {
|
||||
try {
|
||||
log.debug("Invoking shutdown() for shard {} with childShards {}, concurrencyToken {}. Shutdown reason: {}",
|
||||
leaseKeyProvider.apply(shardInfo), childShards, shardInfo.concurrencyToken(), reason);
|
||||
leaseKey, childShards, shardInfo.concurrencyToken(), reason);
|
||||
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final Lease currentShardLease = leaseCoordinator.getCurrentlyHeldLease(leaseKeyProvider.apply(shardInfo));
|
||||
final Runnable leaseLostAction = () -> shardRecordProcessor.leaseLost(LeaseLostInput.builder().build());
|
||||
final Lease currentShardLease = leaseCoordinator.getCurrentlyHeldLease(leaseKey);
|
||||
final Runnable leaseLostAction = () -> shardRecordProcessor.leaseLost(LEASE_LOST_INPUT);
|
||||
|
||||
if (reason == ShutdownReason.SHARD_END) {
|
||||
try {
|
||||
takeShardEndAction(currentShardLease, scope, startTime);
|
||||
takeShardEndAction(currentShardLease, leaseKey, scope, startTime);
|
||||
} catch (InvalidStateException e) {
|
||||
// If InvalidStateException happens, it indicates we have a non recoverable error in short term.
|
||||
// In this scenario, we should shutdown the shardConsumer with LEASE_LOST reason to allow other worker to take the lease and retry shutting down.
|
||||
// In this scenario, we should shutdown the shardConsumer with LEASE_LOST reason to allow
|
||||
// other worker to take the lease and retry shutting down.
|
||||
log.warn("Lease {}: Invalid state encountered while shutting down shardConsumer with SHARD_END reason. " +
|
||||
"Dropping the lease and shutting down shardConsumer using LEASE_LOST reason. ", leaseKeyProvider.apply(shardInfo), e);
|
||||
dropLease(currentShardLease);
|
||||
throwOnApplicationException(leaseLostAction, scope, startTime);
|
||||
"Dropping the lease and shutting down shardConsumer using LEASE_LOST reason.",
|
||||
leaseKey, e);
|
||||
dropLease(currentShardLease, leaseKey);
|
||||
throwOnApplicationException(leaseKey, leaseLostAction, scope, startTime);
|
||||
}
|
||||
} else {
|
||||
throwOnApplicationException(leaseLostAction, scope, startTime);
|
||||
throwOnApplicationException(leaseKey, leaseLostAction, scope, startTime);
|
||||
}
|
||||
|
||||
log.debug("Shutting down retrieval strategy for shard {}.", leaseKeyProvider.apply(shardInfo));
|
||||
log.debug("Shutting down retrieval strategy for shard {}.", leaseKey);
|
||||
recordsPublisher.shutdown();
|
||||
log.debug("Record processor completed shutdown() for shard {}", leaseKeyProvider.apply(shardInfo));
|
||||
log.debug("Record processor completed shutdown() for shard {}", leaseKey);
|
||||
|
||||
return new TaskResult(null);
|
||||
} catch (Exception e) {
|
||||
if (e instanceof CustomerApplicationException) {
|
||||
log.error("Shard {}: Application exception. ", leaseKeyProvider.apply(shardInfo), e);
|
||||
log.error("Shard {}: Application exception.", leaseKey, e);
|
||||
} else {
|
||||
log.error("Shard {}: Caught exception: ", leaseKeyProvider.apply(shardInfo), e);
|
||||
log.error("Shard {}: Caught exception:", leaseKey, e);
|
||||
}
|
||||
exception = e;
|
||||
// backoff if we encounter an exception.
|
||||
try {
|
||||
Thread.sleep(this.backoffTimeMillis);
|
||||
} catch (InterruptedException ie) {
|
||||
log.debug("Shard {}: Interrupted sleep", leaseKeyProvider.apply(shardInfo), ie);
|
||||
log.debug("Shard {}: Interrupted sleep", leaseKey, ie);
|
||||
}
|
||||
|
||||
return new TaskResult(e);
|
||||
}
|
||||
} finally {
|
||||
MetricsUtil.endScope(scope);
|
||||
}
|
||||
|
||||
return new TaskResult(exception);
|
||||
}
|
||||
|
||||
// Involves persisting child shard info, attempt to checkpoint and enqueueing lease for cleanup.
|
||||
private void takeShardEndAction(Lease currentShardLease,
|
||||
MetricsScope scope, long startTime)
|
||||
final String leaseKey, MetricsScope scope, long startTime)
|
||||
throws DependencyException, ProvisionedThroughputException, InvalidStateException,
|
||||
CustomerApplicationException {
|
||||
// Create new lease for the child shards if they don't exist.
|
||||
// We have one valid scenario that shutdown task got created with SHARD_END reason and an empty list of childShards.
|
||||
// This would happen when KinesisDataFetcher(for polling mode) or FanOutRecordsPublisher(for StoS mode) catches ResourceNotFound exception.
|
||||
// In this case, KinesisDataFetcher and FanOutRecordsPublisher will send out SHARD_END signal to trigger a shutdown task with empty list of childShards.
|
||||
// In this case, KinesisDataFetcher and FanOutRecordsPublisher will send out SHARD_END signal to trigger a
|
||||
// shutdown task with empty list of childShards.
|
||||
// This scenario could happen when customer deletes the stream while leaving the KCL application running.
|
||||
if (currentShardLease == null) {
|
||||
throw new InvalidStateException(leaseKeyProvider.apply(shardInfo)
|
||||
throw new InvalidStateException(leaseKey
|
||||
+ " : Lease not owned by the current worker. Leaving ShardEnd handling to new owner.");
|
||||
}
|
||||
if (!CollectionUtils.isNullOrEmpty(childShards)) {
|
||||
createLeasesForChildShardsIfNotExist();
|
||||
createLeasesForChildShardsIfNotExist(scope);
|
||||
updateLeaseWithChildShards(currentShardLease);
|
||||
}
|
||||
final LeasePendingDeletion leasePendingDeletion = new LeasePendingDeletion(streamIdentifier, currentShardLease,
|
||||
|
|
@ -189,7 +196,7 @@ public class ShutdownTask implements ConsumerTask {
|
|||
if (!leaseCleanupManager.isEnqueuedForDeletion(leasePendingDeletion)) {
|
||||
boolean isSuccess = false;
|
||||
try {
|
||||
isSuccess = attemptShardEndCheckpointing(scope, startTime);
|
||||
isSuccess = attemptShardEndCheckpointing(leaseKey, scope, startTime);
|
||||
} finally {
|
||||
// Check if either the shard end ddb persist is successful or
|
||||
// if childshards is empty. When child shards is empty then either it is due to
|
||||
|
|
@ -202,78 +209,102 @@ public class ShutdownTask implements ConsumerTask {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean attemptShardEndCheckpointing(MetricsScope scope, long startTime)
|
||||
private boolean attemptShardEndCheckpointing(final String leaseKey, MetricsScope scope, long startTime)
|
||||
throws DependencyException, ProvisionedThroughputException, InvalidStateException,
|
||||
CustomerApplicationException {
|
||||
final Lease leaseFromDdb = Optional.ofNullable(leaseCoordinator.leaseRefresher().getLease(leaseKeyProvider.apply(shardInfo)))
|
||||
.orElseThrow(() -> new InvalidStateException("Lease for shard " + leaseKeyProvider.apply(shardInfo) + " does not exist."));
|
||||
final Lease leaseFromDdb = Optional.ofNullable(leaseCoordinator.leaseRefresher().getLease(leaseKey))
|
||||
.orElseThrow(() -> new InvalidStateException("Lease for shard " + leaseKey + " does not exist."));
|
||||
if (!leaseFromDdb.checkpoint().equals(ExtendedSequenceNumber.SHARD_END)) {
|
||||
// Call the shardRecordsProcessor to checkpoint with SHARD_END sequence number.
|
||||
// The shardEnded is implemented by customer. We should validate if the SHARD_END checkpointing is successful after calling shardEnded.
|
||||
throwOnApplicationException(() -> applicationCheckpointAndVerification(), scope, startTime);
|
||||
// The shardEnded is implemented by customer. We should validate if the SHARD_END checkpointing is
|
||||
// successful after calling shardEnded.
|
||||
throwOnApplicationException(leaseKey, () -> applicationCheckpointAndVerification(leaseKey),
|
||||
scope, startTime);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private void applicationCheckpointAndVerification() {
|
||||
private void applicationCheckpointAndVerification(final String leaseKey) {
|
||||
recordProcessorCheckpointer
|
||||
.sequenceNumberAtShardEnd(recordProcessorCheckpointer.largestPermittedCheckpointValue());
|
||||
recordProcessorCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END);
|
||||
shardRecordProcessor.shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build());
|
||||
final ExtendedSequenceNumber lastCheckpointValue = recordProcessorCheckpointer.lastCheckpointValue();
|
||||
if (lastCheckpointValue == null
|
||||
|| !lastCheckpointValue.equals(ExtendedSequenceNumber.SHARD_END)) {
|
||||
if (!ExtendedSequenceNumber.SHARD_END.equals(lastCheckpointValue)) {
|
||||
throw new IllegalArgumentException("Application didn't checkpoint at end of shard "
|
||||
+ leaseKeyProvider.apply(shardInfo) + ". Application must checkpoint upon shard end. " +
|
||||
+ leaseKey + ". Application must checkpoint upon shard end. " +
|
||||
"See ShardRecordProcessor.shardEnded javadocs for more information.");
|
||||
}
|
||||
}
|
||||
|
||||
private void throwOnApplicationException(Runnable action, MetricsScope metricsScope, final long startTime) throws CustomerApplicationException {
|
||||
private void throwOnApplicationException(final String leaseKey, Runnable action, MetricsScope metricsScope,
|
||||
final long startTime)
|
||||
throws CustomerApplicationException {
|
||||
try {
|
||||
action.run();
|
||||
} catch (Exception e) {
|
||||
throw new CustomerApplicationException("Customer application throws exception for shard " + leaseKeyProvider.apply(shardInfo) +": ", e);
|
||||
throw new CustomerApplicationException("Customer application throws exception for shard " + leaseKey + ": ", e);
|
||||
} finally {
|
||||
MetricsUtil.addLatency(metricsScope, RECORD_PROCESSOR_SHUTDOWN_METRIC, startTime, MetricsLevel.SUMMARY);
|
||||
}
|
||||
}
|
||||
|
||||
private void createLeasesForChildShardsIfNotExist()
|
||||
private void createLeasesForChildShardsIfNotExist(MetricsScope scope)
|
||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
||||
final LeaseRefresher leaseRefresher = leaseCoordinator.leaseRefresher();
|
||||
|
||||
// For child shard resulted from merge of two parent shards, verify if both the parents are either present or
|
||||
// not present in the lease table before creating the lease entry.
|
||||
if (!CollectionUtils.isNullOrEmpty(childShards) && childShards.size() == 1) {
|
||||
if (childShards.size() == 1) {
|
||||
final ChildShard childShard = childShards.get(0);
|
||||
final List<String> parentLeaseKeys = childShard.parentShards().stream()
|
||||
.map(parentShardId -> ShardInfo.getLeaseKey(shardInfo, parentShardId)).collect(Collectors.toList());
|
||||
if (parentLeaseKeys.size() != 2) {
|
||||
MetricsUtil.addCount(scope, "MissingMergeParent", 1, MetricsLevel.SUMMARY);
|
||||
throw new InvalidStateException("Shard " + shardInfo.shardId() + "'s only child shard " + childShard
|
||||
+ " does not contain other parent information.");
|
||||
} else {
|
||||
boolean isValidLeaseTableState =
|
||||
Objects.isNull(leaseCoordinator.leaseRefresher().getLease(parentLeaseKeys.get(0))) == Objects
|
||||
.isNull(leaseCoordinator.leaseRefresher().getLease(parentLeaseKeys.get(1)));
|
||||
if (!isValidLeaseTableState) {
|
||||
if (!isOneInNProbability(RETRY_RANDOM_MAX_RANGE)) {
|
||||
throw new BlockedOnParentShardException(
|
||||
"Shard " + shardInfo.shardId() + "'s only child shard " + childShard
|
||||
+ " has partial parent information in lease table. Hence deferring lease creation of child shard.");
|
||||
} else {
|
||||
throw new InvalidStateException(
|
||||
"Shard " + shardInfo.shardId() + "'s only child shard " + childShard
|
||||
+ " has partial parent information in lease table. Hence deferring lease creation of child shard.");
|
||||
}
|
||||
}
|
||||
|
||||
final Lease parentLease0 = leaseRefresher.getLease(parentLeaseKeys.get(0));
|
||||
final Lease parentLease1 = leaseRefresher.getLease(parentLeaseKeys.get(1));
|
||||
if (Objects.isNull(parentLease0) != Objects.isNull(parentLease1)) {
|
||||
MetricsUtil.addCount(scope, "MissingMergeParentLease", 1, MetricsLevel.SUMMARY);
|
||||
final String message = "Shard " + shardInfo.shardId() + "'s only child shard " + childShard +
|
||||
" has partial parent information in lease table: [parent0=" + parentLease0 +
|
||||
", parent1=" + parentLease1 + "]. Hence deferring lease creation of child shard.";
|
||||
if (isOneInNProbability(RETRY_RANDOM_MAX_RANGE)) {
|
||||
// abort further attempts and drop the lease; lease will
|
||||
// be reassigned
|
||||
throw new InvalidStateException(message);
|
||||
} else {
|
||||
// initiate a Thread.sleep(...) and keep the lease;
|
||||
// keeping the lease decreases churn of lease reassignments
|
||||
throw new BlockedOnParentShardException(message);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Attempt create leases for child shards.
|
||||
for(ChildShard childShard : childShards) {
|
||||
|
||||
for (ChildShard childShard : childShards) {
|
||||
final String leaseKey = ShardInfo.getLeaseKey(shardInfo, childShard.shardId());
|
||||
if(leaseCoordinator.leaseRefresher().getLease(leaseKey) == null) {
|
||||
if (leaseRefresher.getLease(leaseKey) == null) {
|
||||
log.debug("{} - Shard {} - Attempting to create lease for child shard {}",
|
||||
shardDetector.streamIdentifier(), shardInfo.shardId(), leaseKey);
|
||||
final Lease leaseToCreate = hierarchicalShardSyncer.createLeaseForChildShard(childShard, shardDetector.streamIdentifier());
|
||||
leaseCoordinator.leaseRefresher().createLeaseIfNotExists(leaseToCreate);
|
||||
log.info("Shard {}: Created child shard lease: {}", shardInfo.shardId(), leaseToCreate.leaseKey());
|
||||
final long startTime = System.currentTimeMillis();
|
||||
boolean success = false;
|
||||
try {
|
||||
leaseRefresher.createLeaseIfNotExists(leaseToCreate);
|
||||
success = true;
|
||||
} finally {
|
||||
MetricsUtil.addSuccessAndLatency(scope, "CreateLease", success, startTime, MetricsLevel.DETAILED);
|
||||
if (leaseToCreate.checkpoint() != null) {
|
||||
final String metricName = leaseToCreate.checkpoint().isSentinelCheckpoint() ?
|
||||
leaseToCreate.checkpoint().sequenceNumber() : "SEQUENCE_NUMBER";
|
||||
MetricsUtil.addSuccess(scope, "CreateLease_" + metricName, true, MetricsLevel.DETAILED);
|
||||
}
|
||||
}
|
||||
|
||||
log.info("{} - Shard {}: Created child shard lease: {}", shardDetector.streamIdentifier(), shardInfo.shardId(), leaseToCreate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -283,8 +314,7 @@ public class ShutdownTask implements ConsumerTask {
|
|||
*/
|
||||
@VisibleForTesting
|
||||
boolean isOneInNProbability(int n) {
|
||||
Random r = new Random();
|
||||
return 1 == r.nextInt((n - 1) + 1) + 1;
|
||||
return 0 == RANDOM.nextInt(n);
|
||||
}
|
||||
|
||||
private void updateLeaseWithChildShards(Lease currentLease)
|
||||
|
|
@ -312,10 +342,9 @@ public class ShutdownTask implements ConsumerTask {
|
|||
return reason;
|
||||
}
|
||||
|
||||
private void dropLease(Lease currentLease) {
|
||||
private void dropLease(Lease currentLease, final String leaseKey) {
|
||||
if (currentLease == null) {
|
||||
log.warn("Shard {}: Unable to find the lease for shard. Will shutdown the shardConsumer directly.", leaseKeyProvider.apply(shardInfo));
|
||||
return;
|
||||
log.warn("Shard {}: Unable to find the lease for shard. Will shutdown the shardConsumer directly.", leaseKey);
|
||||
} else {
|
||||
leaseCoordinator.dropLease(currentLease);
|
||||
log.info("Dropped lease for shutting down ShardConsumer: " + currentLease.leaseKey());
|
||||
|
|
|
|||
|
|
@ -20,9 +20,7 @@ import java.util.Objects;
|
|||
import software.amazon.awssdk.services.cloudwatch.model.Dimension;
|
||||
import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
|
||||
|
||||
|
||||
|
||||
/*
|
||||
/**
|
||||
* A representation of a key of a MetricDatum. This class is useful when wanting to compare
|
||||
* whether 2 keys have the same MetricDatum. This feature will be used in MetricAccumulatingQueue
|
||||
* where we aggregate metrics across multiple MetricScopes.
|
||||
|
|
@ -48,12 +46,15 @@ public class CloudWatchMetricKey {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj)
|
||||
if (this == obj) {
|
||||
return true;
|
||||
if (obj == null)
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
CloudWatchMetricKey other = (CloudWatchMetricKey) obj;
|
||||
return Objects.equals(other.dimensions, dimensions) && Objects.equals(other.metricName, metricName);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@
|
|||
package software.amazon.kinesis.metrics;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import lombok.Setter;
|
||||
import lombok.experimental.Accessors;
|
||||
import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
|
||||
|
|
@ -29,14 +28,13 @@ import java.util.Objects;
|
|||
* @param <KeyType> is a class that stores information about a MetricDatum. This is useful
|
||||
* to compare MetricDatums, aggregate similar MetricDatums or store information about a datum
|
||||
* that may be relevant to the user (i.e. MetricName, CustomerId, TimeStamp, etc).
|
||||
*
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
*
|
||||
* Let SampleMetricKey be a KeyType that takes in the time in which the datum was created.
|
||||
*
|
||||
*
|
||||
* MetricDatumWithKey<SampleMetricKey> sampleDatumWithKey = new MetricDatumWithKey<SampleMetricKey>(new
|
||||
* SampleMetricKey(System.currentTimeMillis()), datum)
|
||||
*
|
||||
*/
|
||||
@AllArgsConstructor
|
||||
@Setter
|
||||
|
|
@ -59,12 +57,15 @@ public class MetricDatumWithKey<KeyType> {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj)
|
||||
if (this == obj) {
|
||||
return true;
|
||||
if (obj == null)
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
if (getClass() != obj.getClass())
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
MetricDatumWithKey<?> other = (MetricDatumWithKey<?>) obj;
|
||||
return Objects.equals(other.key, key) && Objects.equals(other.datum, datum);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,43 +15,14 @@
|
|||
|
||||
package software.amazon.kinesis.processor;
|
||||
|
||||
import software.amazon.kinesis.common.InitialPositionInStream;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
import software.amazon.kinesis.common.StreamConfig;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Interface for stream trackers. This is useful for KCL Workers that need
|
||||
* to consume data from multiple streams.
|
||||
* KCL will periodically probe this interface to learn about the new and old streams.
|
||||
* Tracker for consuming multiple Kinesis streams.
|
||||
*/
|
||||
public interface MultiStreamTracker {
|
||||
public interface MultiStreamTracker extends StreamTracker {
|
||||
|
||||
/**
|
||||
* Returns the list of stream config, to be processed by the current application.
|
||||
* <b>Note that the streams list CAN be changed during the application runtime.</b>
|
||||
* This method will be called periodically by the KCL to learn about the change in streams to process.
|
||||
*
|
||||
* @return List of StreamConfig
|
||||
*/
|
||||
List<StreamConfig> streamConfigList();
|
||||
|
||||
/**
|
||||
* Strategy to delete leases of old streams in the lease table.
|
||||
* <b>Note that the strategy CANNOT be changed during the application runtime.</b>
|
||||
*
|
||||
* @return StreamsLeasesDeletionStrategy
|
||||
*/
|
||||
FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy();
|
||||
|
||||
/**
|
||||
* The position for getting records from an "orphaned" stream that is in the lease table but not tracked
|
||||
* Default assumes that the stream no longer need to be tracked, so use LATEST for faster shard end.
|
||||
*
|
||||
* <p>Default value: {@link InitialPositionInStream#LATEST}</p>
|
||||
*/
|
||||
default InitialPositionInStreamExtended orphanedStreamInitialPositionInStream() {
|
||||
return InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST);
|
||||
@Override
|
||||
default boolean isMultiStream() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,9 +15,9 @@
|
|||
|
||||
package software.amazon.kinesis.processor;
|
||||
|
||||
import lombok.Data;
|
||||
import lombok.NonNull;
|
||||
import lombok.experimental.Accessors;
|
||||
import lombok.Data;
|
||||
import lombok.NonNull;
|
||||
import lombok.experimental.Accessors;
|
||||
|
||||
/**
|
||||
* Used by the KCL to configure the processor for processing the records.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package software.amazon.kinesis.processor;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.NonNull;
|
||||
import lombok.ToString;
|
||||
import software.amazon.awssdk.arns.Arn;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
import software.amazon.kinesis.common.StreamConfig;
|
||||
import software.amazon.kinesis.common.StreamIdentifier;
|
||||
|
||||
/**
|
||||
* Tracker for consuming a single Kinesis stream.
|
||||
*/
|
||||
@EqualsAndHashCode
|
||||
@ToString
|
||||
public class SingleStreamTracker implements StreamTracker {
|
||||
|
||||
/**
|
||||
* By default, single-stream applications should expect the target stream
|
||||
* to exist for the duration of the application. Therefore, there is no
|
||||
* expectation for the leases to be deleted mid-execution.
|
||||
*/
|
||||
private static final FormerStreamsLeasesDeletionStrategy NO_LEASE_DELETION =
|
||||
new FormerStreamsLeasesDeletionStrategy.NoLeaseDeletionStrategy();
|
||||
|
||||
private final StreamIdentifier streamIdentifier;
|
||||
|
||||
private final List<StreamConfig> streamConfigs;
|
||||
|
||||
public SingleStreamTracker(String streamName) {
|
||||
this(StreamIdentifier.singleStreamInstance(streamName));
|
||||
}
|
||||
|
||||
public SingleStreamTracker(Arn streamArn) {
|
||||
this(StreamIdentifier.singleStreamInstance(streamArn));
|
||||
}
|
||||
|
||||
public SingleStreamTracker(StreamIdentifier streamIdentifier) {
|
||||
this(streamIdentifier, DEFAULT_POSITION_IN_STREAM);
|
||||
}
|
||||
|
||||
public SingleStreamTracker(
|
||||
StreamIdentifier streamIdentifier,
|
||||
@NonNull InitialPositionInStreamExtended initialPosition) {
|
||||
this(streamIdentifier, new StreamConfig(streamIdentifier, initialPosition));
|
||||
}
|
||||
|
||||
public SingleStreamTracker(
|
||||
String streamName,
|
||||
@NonNull InitialPositionInStreamExtended initialPosition) {
|
||||
this(StreamIdentifier.singleStreamInstance(streamName), initialPosition);
|
||||
}
|
||||
|
||||
public SingleStreamTracker(@NonNull StreamIdentifier streamIdentifier, @NonNull StreamConfig streamConfig) {
|
||||
this.streamIdentifier = streamIdentifier;
|
||||
this.streamConfigs = Collections.singletonList(streamConfig);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<StreamConfig> streamConfigList() {
|
||||
return streamConfigs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy() {
|
||||
return NO_LEASE_DELETION;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isMultiStream() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright 2020 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package software.amazon.kinesis.processor;
|
||||
|
||||
import software.amazon.kinesis.common.InitialPositionInStream;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
import software.amazon.kinesis.common.StreamConfig;
|
||||
import software.amazon.kinesis.common.StreamIdentifier;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Interface for stream trackers.
|
||||
* KCL will periodically probe this interface to learn about the new and old streams.
|
||||
*/
|
||||
public interface StreamTracker {
|
||||
|
||||
/**
|
||||
* Default position to begin consuming records from a Kinesis stream.
|
||||
*
|
||||
* @see #orphanedStreamInitialPositionInStream()
|
||||
*/
|
||||
InitialPositionInStreamExtended DEFAULT_POSITION_IN_STREAM =
|
||||
InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST);
|
||||
|
||||
/**
|
||||
* Returns the list of stream config, to be processed by the current application.
|
||||
* <b>Note that the streams list CAN be changed during the application runtime.</b>
|
||||
* This method will be called periodically by the KCL to learn about the change in streams to process.
|
||||
*
|
||||
* @return List of StreamConfig
|
||||
*/
|
||||
List<StreamConfig> streamConfigList();
|
||||
|
||||
/**
|
||||
* Strategy to delete leases of old streams in the lease table.
|
||||
* <b>Note that the strategy CANNOT be changed during the application runtime.</b>
|
||||
*
|
||||
* @return StreamsLeasesDeletionStrategy
|
||||
*/
|
||||
FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy();
|
||||
|
||||
/**
|
||||
* The position for getting records from an "orphaned" stream that is in the lease table but not tracked
|
||||
* Default assumes that the stream no longer need to be tracked, so use LATEST for faster shard end.
|
||||
*
|
||||
* <p>Default value: {@link InitialPositionInStream#LATEST}</p>
|
||||
*/
|
||||
default InitialPositionInStreamExtended orphanedStreamInitialPositionInStream() {
|
||||
return DEFAULT_POSITION_IN_STREAM;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@link StreamConfig} for the provided stream identifier.
|
||||
*
|
||||
* @param streamIdentifier stream for which to create a new config
|
||||
*/
|
||||
default StreamConfig createStreamConfig(StreamIdentifier streamIdentifier) {
|
||||
return new StreamConfig(streamIdentifier, orphanedStreamInitialPositionInStream());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if this application should accommodate the consumption of
|
||||
* more than one Kinesis stream.
|
||||
* <p>
|
||||
* <b>This method must be consistent.</b> Varying the returned value will
|
||||
* have indeterminate, and likely problematic, effects on stream processing.
|
||||
* </p>
|
||||
*/
|
||||
boolean isMultiStream();
|
||||
|
||||
}
|
||||
|
|
@ -26,7 +26,9 @@ import lombok.experimental.Accessors;
|
|||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
||||
|
||||
/**
|
||||
*
|
||||
* Traverses a {@code Throwable} class inheritance in search of a mapping
|
||||
* function which will convert that throwable into a {@code RuntimeException}.
|
||||
* If no mapping function is found, the default function will be applied.
|
||||
*/
|
||||
@KinesisClientInternalApi
|
||||
public class AWSExceptionManager {
|
||||
|
|
|
|||
|
|
@ -41,11 +41,42 @@ public class IteratorBuilder {
|
|||
ShardIteratorType.AFTER_SEQUENCE_NUMBER);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a GetShardIteratorRequest builder that uses AT_SEQUENCE_NUMBER ShardIteratorType.
|
||||
*
|
||||
* @param builder An initial GetShardIteratorRequest builder to be updated.
|
||||
* @param sequenceNumber The sequence number to restart the request from.
|
||||
* @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP.
|
||||
* @return An updated GetShardIteratorRequest.Builder.
|
||||
*/
|
||||
public static GetShardIteratorRequest.Builder request(GetShardIteratorRequest.Builder builder,
|
||||
String sequenceNumber, InitialPositionInStreamExtended initialPosition) {
|
||||
String sequenceNumber,
|
||||
InitialPositionInStreamExtended initialPosition) {
|
||||
return getShardIteratorRequest(builder, sequenceNumber, initialPosition, ShardIteratorType.AT_SEQUENCE_NUMBER);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a GetShardIteratorRequest builder that uses AFTER_SEQUENCE_NUMBER ShardIteratorType.
|
||||
*
|
||||
* @param builder An initial GetShardIteratorRequest builder to be updated.
|
||||
* @param sequenceNumber The sequence number to restart the request from.
|
||||
* @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP.
|
||||
* @return An updated GetShardIteratorRequest.Builder.
|
||||
*/
|
||||
public static GetShardIteratorRequest.Builder reconnectRequest(GetShardIteratorRequest.Builder builder,
|
||||
String sequenceNumber,
|
||||
InitialPositionInStreamExtended initialPosition) {
|
||||
return getShardIteratorRequest(builder, sequenceNumber, initialPosition, ShardIteratorType.AFTER_SEQUENCE_NUMBER);
|
||||
}
|
||||
|
||||
private static GetShardIteratorRequest.Builder getShardIteratorRequest(GetShardIteratorRequest.Builder builder,
|
||||
String sequenceNumber,
|
||||
InitialPositionInStreamExtended initialPosition,
|
||||
ShardIteratorType shardIteratorType) {
|
||||
return apply(builder, GetShardIteratorRequest.Builder::shardIteratorType, GetShardIteratorRequest.Builder::timestamp,
|
||||
GetShardIteratorRequest.Builder::startingSequenceNumber, initialPosition, sequenceNumber,
|
||||
ShardIteratorType.AT_SEQUENCE_NUMBER);
|
||||
shardIteratorType);
|
||||
}
|
||||
|
||||
private final static Map<String, ShardIteratorType> SHARD_ITERATOR_MAPPING;
|
||||
|
|
|
|||
|
|
@ -21,15 +21,11 @@ import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
|||
import software.amazon.kinesis.common.RequestDetails;
|
||||
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Provides a record publisher that will retrieve records from Kinesis for processing
|
||||
*/
|
||||
public interface RecordsPublisher extends Publisher<RecordsRetrieved> {
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Initializes the publisher with where to start processing. If there is a stored sequence number the publisher will
|
||||
* begin from that sequence number, otherwise it will use the initial position.
|
||||
|
|
@ -47,7 +43,6 @@ public interface RecordsPublisher extends Publisher<RecordsRetrieved> {
|
|||
*/
|
||||
void restartFrom(RecordsRetrieved recordsRetrieved);
|
||||
|
||||
|
||||
/**
|
||||
* Shutdowns the publisher. Once this method returns the publisher should no longer provide any records.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -22,15 +22,18 @@ import lombok.NonNull;
|
|||
import lombok.Setter;
|
||||
import lombok.ToString;
|
||||
import lombok.experimental.Accessors;
|
||||
import software.amazon.awssdk.arns.Arn;
|
||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
||||
import software.amazon.awssdk.utils.Either;
|
||||
import software.amazon.kinesis.common.DeprecationUtils;
|
||||
import software.amazon.kinesis.common.InitialPositionInStream;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
import software.amazon.kinesis.common.StreamConfig;
|
||||
import software.amazon.kinesis.common.StreamIdentifier;
|
||||
import software.amazon.kinesis.processor.MultiStreamTracker;
|
||||
import software.amazon.kinesis.processor.SingleStreamTracker;
|
||||
import software.amazon.kinesis.processor.StreamTracker;
|
||||
import software.amazon.kinesis.retrieval.fanout.FanOutConfig;
|
||||
import software.amazon.kinesis.retrieval.polling.PollingConfig;
|
||||
|
||||
/**
|
||||
* Used by the KCL to configure the retrieval of records from Kinesis.
|
||||
|
|
@ -46,7 +49,7 @@ public class RetrievalConfig {
|
|||
*/
|
||||
public static final String KINESIS_CLIENT_LIB_USER_AGENT = "amazon-kinesis-client-library-java";
|
||||
|
||||
public static final String KINESIS_CLIENT_LIB_USER_AGENT_VERSION = "2.4.3";
|
||||
public static final String KINESIS_CLIENT_LIB_USER_AGENT_VERSION = "2.5.3-SNAPSHOT";
|
||||
|
||||
/**
|
||||
* Client used to make calls to Kinesis for records retrieval
|
||||
|
|
@ -66,9 +69,17 @@ public class RetrievalConfig {
|
|||
|
||||
/**
|
||||
* AppStreamTracker either for multi stream tracking or single stream
|
||||
*
|
||||
* @deprecated Both single- and multi-stream support is now provided by {@link StreamTracker}.
|
||||
* @see #streamTracker
|
||||
*/
|
||||
private Either<MultiStreamTracker, StreamConfig> appStreamTracker;
|
||||
|
||||
/**
|
||||
* Stream(s) to be consumed by this KCL application.
|
||||
*/
|
||||
private StreamTracker streamTracker;
|
||||
|
||||
/**
|
||||
* Backoff time between consecutive ListShards calls.
|
||||
*
|
||||
|
|
@ -94,7 +105,12 @@ public class RetrievalConfig {
|
|||
* <p>
|
||||
* Default value: {@link InitialPositionInStream#LATEST}
|
||||
* </p>
|
||||
*
|
||||
* @deprecated Initial stream position is now handled by {@link StreamTracker}.
|
||||
* @see StreamTracker#orphanedStreamInitialPositionInStream()
|
||||
* @see StreamTracker#createStreamConfig(StreamIdentifier)
|
||||
*/
|
||||
@Deprecated
|
||||
private InitialPositionInStreamExtended initialPositionInStreamExtended = InitialPositionInStreamExtended
|
||||
.newInitialPosition(InitialPositionInStream.LATEST);
|
||||
|
||||
|
|
@ -104,75 +120,75 @@ public class RetrievalConfig {
|
|||
|
||||
public RetrievalConfig(@NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull String streamName,
|
||||
@NonNull String applicationName) {
|
||||
this.kinesisClient = kinesisAsyncClient;
|
||||
this.appStreamTracker = Either
|
||||
.right(new StreamConfig(StreamIdentifier.singleStreamInstance(streamName), initialPositionInStreamExtended));
|
||||
this.applicationName = applicationName;
|
||||
this(kinesisAsyncClient, new SingleStreamTracker(streamName), applicationName);
|
||||
}
|
||||
|
||||
public RetrievalConfig(@NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull MultiStreamTracker multiStreamTracker,
|
||||
public RetrievalConfig(@NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull Arn streamArn,
|
||||
@NonNull String applicationName) {
|
||||
this(kinesisAsyncClient, new SingleStreamTracker(streamArn), applicationName);
|
||||
}
|
||||
|
||||
public RetrievalConfig(@NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull StreamTracker streamTracker,
|
||||
@NonNull String applicationName) {
|
||||
this.kinesisClient = kinesisAsyncClient;
|
||||
this.appStreamTracker = Either.left(multiStreamTracker);
|
||||
this.streamTracker = streamTracker;
|
||||
this.applicationName = applicationName;
|
||||
this.appStreamTracker = DeprecationUtils.convert(streamTracker,
|
||||
singleStreamTracker -> singleStreamTracker.streamConfigList().get(0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method to reconfigure the embedded {@link StreamTracker},
|
||||
* but only when <b>not</b> in multi-stream mode.
|
||||
*
|
||||
* @param initialPositionInStreamExtended
|
||||
*
|
||||
* @deprecated Initial stream position is now handled by {@link StreamTracker}.
|
||||
* @see StreamTracker#orphanedStreamInitialPositionInStream()
|
||||
* @see StreamTracker#createStreamConfig(StreamIdentifier)
|
||||
*/
|
||||
@Deprecated
|
||||
public RetrievalConfig initialPositionInStreamExtended(InitialPositionInStreamExtended initialPositionInStreamExtended) {
|
||||
final StreamConfig[] streamConfig = new StreamConfig[1];
|
||||
this.appStreamTracker.apply(multiStreamTracker -> {
|
||||
if (streamTracker().isMultiStream()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Cannot set initialPositionInStreamExtended when multiStreamTracker is set");
|
||||
}, sc -> streamConfig[0] = sc);
|
||||
this.appStreamTracker = Either
|
||||
.right(new StreamConfig(streamConfig[0].streamIdentifier(), initialPositionInStreamExtended));
|
||||
}
|
||||
|
||||
final StreamIdentifier streamIdentifier = getSingleStreamIdentifier();
|
||||
final StreamConfig updatedConfig = new StreamConfig(streamIdentifier, initialPositionInStreamExtended);
|
||||
streamTracker = new SingleStreamTracker(streamIdentifier, updatedConfig);
|
||||
appStreamTracker = Either.right(updatedConfig);
|
||||
return this;
|
||||
}
|
||||
|
||||
public RetrievalConfig retrievalSpecificConfig(RetrievalSpecificConfig retrievalSpecificConfig) {
|
||||
retrievalSpecificConfig.validateState(streamTracker.isMultiStream());
|
||||
this.retrievalSpecificConfig = retrievalSpecificConfig;
|
||||
validateFanoutConfig();
|
||||
validatePollingConfig();
|
||||
return this;
|
||||
}
|
||||
|
||||
public RetrievalFactory retrievalFactory() {
|
||||
if (retrievalFactory == null) {
|
||||
if (retrievalSpecificConfig == null) {
|
||||
retrievalSpecificConfig = new FanOutConfig(kinesisClient())
|
||||
final FanOutConfig fanOutConfig = new FanOutConfig(kinesisClient())
|
||||
.applicationName(applicationName());
|
||||
retrievalSpecificConfig = appStreamTracker.map(multiStreamTracker -> retrievalSpecificConfig,
|
||||
streamConfig -> ((FanOutConfig) retrievalSpecificConfig).streamName(streamConfig.streamIdentifier().streamName()));
|
||||
if (!streamTracker.isMultiStream()) {
|
||||
final String streamName = getSingleStreamIdentifier().streamName();
|
||||
fanOutConfig.streamName(streamName);
|
||||
}
|
||||
retrievalSpecificConfig(fanOutConfig);
|
||||
}
|
||||
retrievalFactory = retrievalSpecificConfig.retrievalFactory();
|
||||
}
|
||||
return retrievalFactory;
|
||||
}
|
||||
|
||||
private void validateFanoutConfig() {
|
||||
// If we are in multistream mode and if retrievalSpecificConfig is an instance of FanOutConfig and if consumerArn is set throw exception.
|
||||
boolean isFanoutConfig = retrievalSpecificConfig instanceof FanOutConfig;
|
||||
boolean isInvalidFanoutConfig = isFanoutConfig && appStreamTracker.map(
|
||||
multiStreamTracker -> ((FanOutConfig) retrievalSpecificConfig).consumerArn() != null
|
||||
|| ((FanOutConfig) retrievalSpecificConfig).streamName() != null,
|
||||
streamConfig -> streamConfig.streamIdentifier() == null
|
||||
|| streamConfig.streamIdentifier().streamName() == null);
|
||||
if(isInvalidFanoutConfig) {
|
||||
throw new IllegalArgumentException(
|
||||
"Invalid config: Either in multi-stream mode with streamName/consumerArn configured or in single-stream mode with no streamName configured");
|
||||
}
|
||||
/**
|
||||
* Convenience method to return the {@link StreamIdentifier} from a
|
||||
* single-stream tracker.
|
||||
*/
|
||||
private StreamIdentifier getSingleStreamIdentifier() {
|
||||
return streamTracker.streamConfigList().get(0).streamIdentifier();
|
||||
}
|
||||
|
||||
private void validatePollingConfig() {
|
||||
boolean isPollingConfig = retrievalSpecificConfig instanceof PollingConfig;
|
||||
boolean isInvalidPollingConfig = isPollingConfig && appStreamTracker.map(
|
||||
multiStreamTracker ->
|
||||
((PollingConfig) retrievalSpecificConfig).streamName() != null,
|
||||
streamConfig ->
|
||||
streamConfig.streamIdentifier() == null || streamConfig.streamIdentifier().streamName() == null);
|
||||
|
||||
if (isInvalidPollingConfig) {
|
||||
throw new IllegalArgumentException(
|
||||
"Invalid config: Either in multi-stream mode with streamName configured or in single-stream mode with no streamName configured");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,9 +15,6 @@
|
|||
|
||||
package software.amazon.kinesis.retrieval;
|
||||
|
||||
import java.util.function.Function;
|
||||
import software.amazon.kinesis.retrieval.polling.DataFetcher;
|
||||
|
||||
public interface RetrievalSpecificConfig {
|
||||
/**
|
||||
* Creates and returns a retrieval factory for the specific configuration
|
||||
|
|
@ -25,4 +22,23 @@ public interface RetrievalSpecificConfig {
|
|||
* @return a retrieval factory that can create an appropriate retriever
|
||||
*/
|
||||
RetrievalFactory retrievalFactory();
|
||||
|
||||
/**
|
||||
* Validates this instance is configured properly. For example, this
|
||||
* method may validate that the stream name, if one is required, is
|
||||
* non-null.
|
||||
* <br/><br/>
|
||||
* If not in a valid state, an informative unchecked Exception -- for
|
||||
* example, an {@link IllegalArgumentException} -- should be thrown so
|
||||
* the caller may rectify the misconfiguration.
|
||||
*
|
||||
* @param isMultiStream whether state should be validated for multi-stream
|
||||
*
|
||||
* @deprecated remove keyword `default` to force implementation-specific behavior
|
||||
*/
|
||||
@Deprecated
|
||||
default void validateState(boolean isMultiStream) {
|
||||
// TODO convert this to a non-default implementation in a "major" release
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -80,10 +80,21 @@ public class FanOutConfig implements RetrievalSpecificConfig {
|
|||
*/
|
||||
private long retryBackoffMillis = 1000;
|
||||
|
||||
@Override public RetrievalFactory retrievalFactory() {
|
||||
@Override
|
||||
public RetrievalFactory retrievalFactory() {
|
||||
return new FanOutRetrievalFactory(kinesisClient, streamName, consumerArn, this::getOrCreateConsumerArn);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validateState(final boolean isMultiStream) {
|
||||
if (isMultiStream) {
|
||||
if ((streamName() != null) || (consumerArn() != null)) {
|
||||
throw new IllegalArgumentException(
|
||||
"FanOutConfig must not have streamName/consumerArn configured in multi-stream mode");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String getOrCreateConsumerArn(String streamName) {
|
||||
FanOutConsumerRegistration registration = createConsumerRegistration(streamName);
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -27,14 +27,12 @@ import org.reactivestreams.Subscriber;
|
|||
import org.reactivestreams.Subscription;
|
||||
import software.amazon.awssdk.core.async.SdkPublisher;
|
||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
||||
import software.amazon.awssdk.services.kinesis.model.ChildShard;
|
||||
import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
|
||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent;
|
||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream;
|
||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
|
||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponse;
|
||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler;
|
||||
import software.amazon.awssdk.utils.CollectionUtils;
|
||||
import software.amazon.awssdk.utils.Either;
|
||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
|
|
@ -117,7 +115,6 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
this.currentSequenceNumber = extendedSequenceNumber.sequenceNumber();
|
||||
this.isFirstConnection = true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -192,7 +189,7 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
// Take action based on the time spent by the event in queue.
|
||||
takeDelayedDeliveryActionIfRequired(streamAndShardId, recordsRetrievedContext.getEnqueueTimestamp(), log);
|
||||
// Update current sequence number for the successfully delivered event.
|
||||
currentSequenceNumber = ((FanoutRecordsRetrieved)recordsRetrieved).continuationSequenceNumber();
|
||||
currentSequenceNumber = ((FanoutRecordsRetrieved) recordsRetrieved).continuationSequenceNumber();
|
||||
// Update the triggering flow for post scheduling upstream request.
|
||||
flowToBeReturned = recordsRetrievedContext.getRecordFlow();
|
||||
// Try scheduling the next event in the queue or execute the subscription shutdown action.
|
||||
|
|
@ -206,7 +203,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
if (flow != null && recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier()
|
||||
.equals(flow.getSubscribeToShardId())) {
|
||||
log.error(
|
||||
"{}: Received unexpected ack for the active subscription {}. Throwing. ", streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier());
|
||||
"{}: Received unexpected ack for the active subscription {}. Throwing.",
|
||||
streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier());
|
||||
throw new IllegalStateException("Unexpected ack for the active subscription");
|
||||
}
|
||||
// Otherwise publisher received a stale ack.
|
||||
|
|
@ -232,8 +230,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
subscriber.onNext(recordsRetrieved);
|
||||
}
|
||||
} catch (IllegalStateException e) {
|
||||
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
log.warn("{}: Unable to enqueue the payload due to capacity restrictions in delivery queue with remaining capacity {}. Last successful request details -- {}",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
streamAndShardId, recordsDeliveryQueue.remainingCapacity(), lastSuccessfulRequestDetails);
|
||||
throw e;
|
||||
} catch (Throwable t) {
|
||||
|
|
@ -275,7 +274,6 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
SubscriptionShutdownEvent(Runnable subscriptionShutdownAction, String eventIdentifier) {
|
||||
this(subscriptionShutdownAction, eventIdentifier, null);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private boolean hasValidSubscriber() {
|
||||
|
|
@ -315,7 +313,7 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
synchronized (lockObject) {
|
||||
|
||||
if (!hasValidSubscriber()) {
|
||||
if(hasValidFlow()) {
|
||||
if (hasValidFlow()) {
|
||||
log.warn(
|
||||
"{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- Subscriber is null." +
|
||||
" Last successful request details -- {}", streamAndShardId, flow.connectionStartedAt,
|
||||
|
|
@ -335,7 +333,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
if (flow != null) {
|
||||
String logMessage = String.format(
|
||||
"%s: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ %s id: %s -- %s." +
|
||||
" Last successful request details -- %s", streamAndShardId, flow.connectionStartedAt, flow.subscribeToShardId, category.throwableTypeString, lastSuccessfulRequestDetails);
|
||||
" Last successful request details -- %s", streamAndShardId, flow.connectionStartedAt,
|
||||
flow.subscribeToShardId, category.throwableTypeString, lastSuccessfulRequestDetails);
|
||||
switch (category.throwableType) {
|
||||
case READ_TIMEOUT:
|
||||
log.debug(logMessage, propagationThrowable);
|
||||
|
|
@ -367,7 +366,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
} else {
|
||||
if (triggeringFlow != null) {
|
||||
log.debug(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- {} -> triggeringFlow wasn't the active flow. Didn't dispatch error",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId,
|
||||
category.throwableTypeString);
|
||||
triggeringFlow.cancel();
|
||||
|
|
@ -381,8 +382,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
private void resetRecordsDeliveryStateOnSubscriptionOnInit() {
|
||||
// Clear any lingering records in the queue.
|
||||
if (!recordsDeliveryQueue.isEmpty()) {
|
||||
log.warn("{}: Found non-empty queue while starting subscription. This indicates unsuccessful clean up of"
|
||||
+ "previous subscription - {}. Last successful request details -- {}", streamAndShardId, subscribeToShardId, lastSuccessfulRequestDetails);
|
||||
log.warn("{}: Found non-empty queue while starting subscription. This indicates unsuccessful clean up of "
|
||||
+ "previous subscription - {}. Last successful request details -- {}",
|
||||
streamAndShardId, subscribeToShardId, lastSuccessfulRequestDetails);
|
||||
recordsDeliveryQueue.clear();
|
||||
}
|
||||
}
|
||||
|
|
@ -402,7 +404,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
// The ack received for this onNext event will be ignored by the publisher as the global flow object should
|
||||
// be either null or renewed when the ack's flow identifier is evaluated.
|
||||
FanoutRecordsRetrieved response = new FanoutRecordsRetrieved(
|
||||
ProcessRecordsInput.builder().records(Collections.emptyList()).isAtShardEnd(true).childShards(Collections.emptyList()).build(), null,
|
||||
ProcessRecordsInput.builder().records(Collections.emptyList()).isAtShardEnd(true)
|
||||
.childShards(Collections.emptyList()).build(), null,
|
||||
triggeringFlow != null ? triggeringFlow.getSubscribeToShardId() : shardId + "-no-flow-found");
|
||||
subscriber.onNext(response);
|
||||
subscriber.onComplete();
|
||||
|
|
@ -515,7 +518,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
private void updateAvailableQueueSpaceAndRequestUpstream(RecordFlow triggeringFlow) {
|
||||
if (availableQueueSpace <= 0) {
|
||||
log.debug(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Attempted to decrement availableQueueSpace to below 0",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId);
|
||||
} else {
|
||||
availableQueueSpace--;
|
||||
|
|
@ -544,7 +549,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
|
||||
if (!isActiveFlow(triggeringFlow)) {
|
||||
log.debug(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {} -- Received spurious onComplete from unexpected flow. Ignoring.",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId);
|
||||
return;
|
||||
}
|
||||
|
|
@ -603,7 +610,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
synchronized (lockObject) {
|
||||
if (subscriber != s) {
|
||||
log.warn(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: (FanOutRecordsPublisher/Subscription#request) - Rejected an attempt to request({}), because subscribers don't match. Last successful request details -- {}",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
streamAndShardId, n, lastSuccessfulRequestDetails);
|
||||
return;
|
||||
}
|
||||
|
|
@ -630,13 +639,17 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
synchronized (lockObject) {
|
||||
if (subscriber != s) {
|
||||
log.warn(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: (FanOutRecordsPublisher/Subscription#cancel) - Rejected attempt to cancel subscription, because subscribers don't match. Last successful request details -- {}",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
streamAndShardId, lastSuccessfulRequestDetails);
|
||||
return;
|
||||
}
|
||||
if (!hasValidSubscriber()) {
|
||||
log.warn(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: (FanOutRecordsPublisher/Subscription#cancel) - Cancelled called even with an invalid subscriber. Last successful request details -- {}",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
streamAndShardId, lastSuccessfulRequestDetails);
|
||||
}
|
||||
subscriber = null;
|
||||
|
|
@ -778,7 +791,11 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
executeExceptionOccurred(throwable);
|
||||
} else {
|
||||
final SubscriptionShutdownEvent subscriptionShutdownEvent = new SubscriptionShutdownEvent(
|
||||
() -> {parent.recordsDeliveryQueue.poll(); executeExceptionOccurred(throwable);}, "onError", throwable);
|
||||
() -> {
|
||||
parent.recordsDeliveryQueue.poll();
|
||||
executeExceptionOccurred(throwable);
|
||||
},
|
||||
"onError", throwable);
|
||||
tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent);
|
||||
}
|
||||
}
|
||||
|
|
@ -786,13 +803,14 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
|
||||
private void executeExceptionOccurred(Throwable throwable) {
|
||||
synchronized (parent.lockObject) {
|
||||
|
||||
log.debug("{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- {}: {}",
|
||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(),
|
||||
throwable.getMessage());
|
||||
if (this.isDisposed) {
|
||||
log.debug(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- This flow has been disposed, not dispatching error. {}: {}",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(),
|
||||
throwable.getMessage());
|
||||
this.isErrorDispatched = true;
|
||||
|
|
@ -803,7 +821,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
isErrorDispatched = true;
|
||||
} else {
|
||||
log.debug(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- An error has previously been dispatched, not dispatching this error {}: {}",
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(),
|
||||
throwable.getMessage());
|
||||
}
|
||||
|
|
@ -817,7 +837,11 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
executeComplete();
|
||||
} else {
|
||||
final SubscriptionShutdownEvent subscriptionShutdownEvent = new SubscriptionShutdownEvent(
|
||||
() -> {parent.recordsDeliveryQueue.poll(); executeComplete();}, "onComplete");
|
||||
() -> {
|
||||
parent.recordsDeliveryQueue.poll();
|
||||
executeComplete();
|
||||
},
|
||||
"onComplete");
|
||||
tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent);
|
||||
}
|
||||
}
|
||||
|
|
@ -830,7 +854,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
.add(new RecordsRetrievedContext(Either.right(subscriptionShutdownEvent), this, Instant.now()));
|
||||
} catch (Exception e) {
|
||||
log.warn(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: Unable to enqueue the {} shutdown event due to capacity restrictions in delivery queue with remaining capacity {}. Ignoring. Last successful request details -- {}",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
parent.streamAndShardId, subscriptionShutdownEvent.getEventIdentifier(), parent.recordsDeliveryQueue.remainingCapacity(),
|
||||
parent.lastSuccessfulRequestDetails, subscriptionShutdownEvent.getShutdownEventThrowableOptional());
|
||||
}
|
||||
|
|
@ -854,7 +880,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
}
|
||||
if (this.isDisposed) {
|
||||
log.warn(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- This flow has been disposed not dispatching completion. Last successful request details -- {}",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, parent.lastSuccessfulRequestDetails);
|
||||
return;
|
||||
}
|
||||
|
|
@ -872,7 +900,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
subscription.cancel();
|
||||
} catch (Throwable t) {
|
||||
log.error(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- Exception while trying to cancel failed subscription: {}",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId, t.getMessage(), t);
|
||||
}
|
||||
}
|
||||
|
|
@ -934,12 +964,16 @@ public class FanOutRecordsPublisher implements RecordsPublisher {
|
|||
if (flow.shouldSubscriptionCancel()) {
|
||||
if (flow.isCancelled) {
|
||||
log.debug(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- Subscription was cancelled before onSubscribe",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId);
|
||||
}
|
||||
if (flow.isDisposed) {
|
||||
log.debug(
|
||||
// CHECKSTYLE.OFF: LineLength
|
||||
"{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- RecordFlow has been disposed cancelling subscribe",
|
||||
// CHECKSTYLE.ON: LineLength
|
||||
parent.streamAndShardId, connectionStartedAt, subscribeToShardId);
|
||||
}
|
||||
log.debug(
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ public class FanOutRetrievalFactory implements RetrievalFactory {
|
|||
final StreamConfig streamConfig,
|
||||
final MetricsFactory metricsFactory) {
|
||||
final Optional<String> streamIdentifierStr = shardInfo.streamIdentifierSerOpt();
|
||||
if(streamIdentifierStr.isPresent()) {
|
||||
if (streamIdentifierStr.isPresent()) {
|
||||
final StreamIdentifier streamIdentifier = StreamIdentifier.multiStreamInstance(streamIdentifierStr.get());
|
||||
return new FanOutRecordsPublisher(kinesisClient, shardInfo.shardId(),
|
||||
getOrCreateConsumerArn(streamIdentifier, streamConfig.consumerArn()),
|
||||
|
|
|
|||
|
|
@ -15,8 +15,12 @@
|
|||
package software.amazon.kinesis.retrieval.kpl;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
//import com.amazonaws.services.kinesis.clientlibrary.lib.worker.String;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import software.amazon.kinesis.checkpoint.SentinelCheckpoint;
|
||||
|
||||
/**
|
||||
|
|
@ -28,10 +32,8 @@ import software.amazon.kinesis.checkpoint.SentinelCheckpoint;
|
|||
* user record therefore has an integer sub-sequence number, in addition to the
|
||||
* regular sequence number of the Kinesis record. The sub-sequence number is
|
||||
* used to checkpoint within an aggregated record.
|
||||
*
|
||||
* @author daphnliu
|
||||
*
|
||||
*/
|
||||
@EqualsAndHashCode
|
||||
public class ExtendedSequenceNumber implements Comparable<ExtendedSequenceNumber> {
|
||||
private final String sequenceNumber;
|
||||
private final long subSequenceNumber;
|
||||
|
|
@ -65,6 +67,15 @@ public class ExtendedSequenceNumber implements Comparable<ExtendedSequenceNumber
|
|||
public static final ExtendedSequenceNumber AT_TIMESTAMP =
|
||||
new ExtendedSequenceNumber(SentinelCheckpoint.AT_TIMESTAMP.toString());
|
||||
|
||||
/**
|
||||
* Cache of {@link SentinelCheckpoint} values that avoids expensive
|
||||
* try-catch and Exception handling.
|
||||
*
|
||||
* @see #isSentinelCheckpoint()
|
||||
*/
|
||||
private static final Set<String> SENTINEL_VALUES = Collections.unmodifiableSet(
|
||||
Arrays.stream(SentinelCheckpoint.values()).map(SentinelCheckpoint::name).collect(Collectors.toSet()));
|
||||
|
||||
/**
|
||||
* Construct an ExtendedSequenceNumber. The sub-sequence number defaults to
|
||||
* 0.
|
||||
|
|
@ -87,7 +98,7 @@ public class ExtendedSequenceNumber implements Comparable<ExtendedSequenceNumber
|
|||
*/
|
||||
public ExtendedSequenceNumber(String sequenceNumber, Long subSequenceNumber) {
|
||||
this.sequenceNumber = sequenceNumber;
|
||||
this.subSequenceNumber = subSequenceNumber == null ? 0 : subSequenceNumber.longValue();
|
||||
this.subSequenceNumber = subSequenceNumber == null ? 0L : subSequenceNumber;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -104,7 +115,7 @@ public class ExtendedSequenceNumber implements Comparable<ExtendedSequenceNumber
|
|||
public int compareTo(ExtendedSequenceNumber extendedSequenceNumber) {
|
||||
String secondSequenceNumber = extendedSequenceNumber.sequenceNumber();
|
||||
|
||||
if (!isDigitsOrSentinelValue(sequenceNumber) || !isDigitsOrSentinelValue(secondSequenceNumber)) {
|
||||
if (!isDigitsOrSentinelValue(this) || !isDigitsOrSentinelValue(extendedSequenceNumber)) {
|
||||
throw new IllegalArgumentException("Expected a sequence number or a sentinel checkpoint value but "
|
||||
+ "received: first=" + sequenceNumber + " and second=" + secondSequenceNumber);
|
||||
}
|
||||
|
|
@ -141,7 +152,6 @@ public class ExtendedSequenceNumber implements Comparable<ExtendedSequenceNumber
|
|||
return subSequenceNumber;
|
||||
}
|
||||
|
||||
|
||||
public boolean isShardEnd() {
|
||||
return sequenceNumber.equals(SentinelCheckpoint.SHARD_END.toString());
|
||||
}
|
||||
|
|
@ -149,49 +159,17 @@ public class ExtendedSequenceNumber implements Comparable<ExtendedSequenceNumber
|
|||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("{");
|
||||
sb.append('{');
|
||||
if (sequenceNumber() != null) {
|
||||
sb.append("SequenceNumber: " + sequenceNumber() + ",");
|
||||
sb.append("SequenceNumber: ").append(sequenceNumber()).append(',');
|
||||
}
|
||||
if (subSequenceNumber >= 0) {
|
||||
sb.append("SubsequenceNumber: " + subSequenceNumber());
|
||||
sb.append("SubsequenceNumber: ").append(subSequenceNumber());
|
||||
}
|
||||
sb.append("}");
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
final int shift = 32;
|
||||
int hashCode = 1;
|
||||
hashCode = prime * hashCode + ((sequenceNumber == null) ? 0 : sequenceNumber.hashCode());
|
||||
hashCode = prime * hashCode + ((subSequenceNumber < 0)
|
||||
? 0
|
||||
: (int) (subSequenceNumber ^ (subSequenceNumber >>> shift)));
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(obj instanceof ExtendedSequenceNumber)) {
|
||||
return false;
|
||||
}
|
||||
ExtendedSequenceNumber other = (ExtendedSequenceNumber) obj;
|
||||
|
||||
if (!sequenceNumber.equals(other.sequenceNumber())) {
|
||||
return false;
|
||||
}
|
||||
return subSequenceNumber == other.subSequenceNumber();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sequence numbers are converted, sentinels are given a value of -1. Note this method is only used after special
|
||||
* logic associated with SHARD_END and the case of comparing two sentinel values has already passed, so we map
|
||||
|
|
@ -217,30 +195,23 @@ public class ExtendedSequenceNumber implements Comparable<ExtendedSequenceNumber
|
|||
}
|
||||
|
||||
/**
|
||||
* Checks if the string is all digits or one of the SentinelCheckpoint values.
|
||||
* Checks if a sequence number is all digits or a {@link SentinelCheckpoint}.
|
||||
*
|
||||
* @param string
|
||||
* @param esn {@code ExtendedSequenceNumber} to validate its sequence number
|
||||
* @return true if and only if the string is all digits or one of the SentinelCheckpoint values
|
||||
*/
|
||||
private static boolean isDigitsOrSentinelValue(String string) {
|
||||
return isDigits(string) || isSentinelValue(string);
|
||||
private static boolean isDigitsOrSentinelValue(final ExtendedSequenceNumber esn) {
|
||||
return isDigits(esn.sequenceNumber()) || esn.isSentinelCheckpoint();
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the string is a SentinelCheckpoint value.
|
||||
*
|
||||
* @param string
|
||||
* @return true if and only if the string can be converted to a SentinelCheckpoint
|
||||
* Returns true if-and-only-if the sequence number is a {@link SentinelCheckpoint}.
|
||||
* Subsequence numbers are ignored when making this determination.
|
||||
*/
|
||||
private static boolean isSentinelValue(String string) {
|
||||
try {
|
||||
SentinelCheckpoint.valueOf(string);
|
||||
return true;
|
||||
} catch (Exception e) {
|
||||
return false;
|
||||
}
|
||||
public boolean isSentinelCheckpoint() {
|
||||
return SENTINEL_VALUES.contains(sequenceNumber);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks if the string is composed of only digits.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -28,14 +28,12 @@ import lombok.extern.slf4j.Slf4j;
|
|||
import org.apache.commons.lang3.StringUtils;
|
||||
import software.amazon.awssdk.core.exception.SdkException;
|
||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
||||
import software.amazon.awssdk.services.kinesis.model.ChildShard;
|
||||
import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest;
|
||||
import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse;
|
||||
import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest;
|
||||
import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse;
|
||||
import software.amazon.awssdk.services.kinesis.model.KinesisException;
|
||||
import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException;
|
||||
import software.amazon.awssdk.utils.CollectionUtils;
|
||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
||||
import software.amazon.kinesis.common.FutureUtils;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
|
|
@ -48,7 +46,6 @@ import software.amazon.kinesis.metrics.MetricsUtil;
|
|||
import software.amazon.kinesis.retrieval.AWSExceptionManager;
|
||||
import software.amazon.kinesis.retrieval.DataFetcherProviderConfig;
|
||||
import software.amazon.kinesis.retrieval.DataFetcherResult;
|
||||
import software.amazon.kinesis.retrieval.DataRetrievalUtil;
|
||||
import software.amazon.kinesis.retrieval.IteratorBuilder;
|
||||
import software.amazon.kinesis.retrieval.KinesisDataFetcherProviderConfig;
|
||||
import software.amazon.kinesis.retrieval.RetryableRetrievalException;
|
||||
|
|
@ -66,6 +63,14 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
private static final String METRICS_PREFIX = "KinesisDataFetcher";
|
||||
private static final String OPERATION = "ProcessTask";
|
||||
|
||||
/**
|
||||
* Reusable {@link AWSExceptionManager}.
|
||||
* <p>
|
||||
* N.B. This instance is mutable, but thread-safe for <b>read-only</b> use.
|
||||
* </p>
|
||||
*/
|
||||
private static final AWSExceptionManager AWS_EXCEPTION_MANAGER = createExceptionManager();
|
||||
|
||||
@NonNull
|
||||
private final KinesisAsyncClient kinesisClient;
|
||||
@NonNull @Getter
|
||||
|
|
@ -91,8 +96,6 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
|
||||
/**
|
||||
* Note: This method has package level access for testing purposes.
|
||||
*
|
||||
* @return nextIterator
|
||||
*/
|
||||
@Getter(AccessLevel.PACKAGE)
|
||||
private String nextIterator;
|
||||
|
|
@ -142,7 +145,9 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
}
|
||||
}
|
||||
|
||||
// CHECKSTYLE.OFF: MemberName
|
||||
final DataFetcherResult TERMINAL_RESULT = new DataFetcherResult() {
|
||||
// CHECKSTYLE.ON: MemberName
|
||||
@Override
|
||||
public GetRecordsResponse getResult() {
|
||||
return GetRecordsResponse.builder()
|
||||
|
|
@ -223,16 +228,27 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
@Override
|
||||
public void advanceIteratorTo(final String sequenceNumber,
|
||||
final InitialPositionInStreamExtended initialPositionInStream) {
|
||||
advanceIteratorTo(sequenceNumber, initialPositionInStream, false);
|
||||
}
|
||||
|
||||
private void advanceIteratorTo(final String sequenceNumber,
|
||||
final InitialPositionInStreamExtended initialPositionInStream,
|
||||
boolean isIteratorRestart) {
|
||||
if (sequenceNumber == null) {
|
||||
throw new IllegalArgumentException("SequenceNumber should not be null: shardId " + shardId);
|
||||
}
|
||||
|
||||
final AWSExceptionManager exceptionManager = createExceptionManager();
|
||||
|
||||
GetShardIteratorRequest.Builder builder = KinesisRequestsBuilder.getShardIteratorRequestBuilder()
|
||||
.streamName(streamIdentifier.streamName()).shardId(shardId);
|
||||
GetShardIteratorRequest request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStream)
|
||||
.build();
|
||||
streamIdentifier.streamArnOptional().ifPresent(arn -> builder.streamARN(arn.toString()));
|
||||
|
||||
GetShardIteratorRequest request;
|
||||
if (isIteratorRestart) {
|
||||
request = IteratorBuilder.reconnectRequest(builder, sequenceNumber, initialPositionInStream).build();
|
||||
} else {
|
||||
request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStream).build();
|
||||
}
|
||||
log.debug("[GetShardIterator] Request has parameters {}", request);
|
||||
|
||||
// TODO: Check if this metric is fine to be added
|
||||
final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, OPERATION);
|
||||
|
|
@ -246,7 +262,7 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
nextIterator = getNextIterator(request);
|
||||
success = true;
|
||||
} catch (ExecutionException e) {
|
||||
throw exceptionManager.apply(e.getCause());
|
||||
throw AWS_EXCEPTION_MANAGER.apply(e.getCause());
|
||||
} catch (InterruptedException e) {
|
||||
// TODO: Check behavior
|
||||
throw new RuntimeException(e);
|
||||
|
|
@ -270,8 +286,8 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
}
|
||||
|
||||
/**
|
||||
* Gets a new iterator from the last known sequence number i.e. the sequence number of the last record from the last
|
||||
* records call.
|
||||
* Gets a new next shard iterator from last known sequence number i.e. the sequence number of the last
|
||||
* record from the last records call.
|
||||
*/
|
||||
@Override
|
||||
public void restartIterator() {
|
||||
|
|
@ -279,7 +295,9 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
throw new IllegalStateException(
|
||||
"Make sure to initialize the KinesisDataFetcher before restarting the iterator.");
|
||||
}
|
||||
advanceIteratorTo(lastKnownSequenceNumber, initialPositionInStream);
|
||||
log.debug("Restarting iterator for sequence number {} on shard id {}",
|
||||
lastKnownSequenceNumber, streamAndShardId);
|
||||
advanceIteratorTo(lastKnownSequenceNumber, initialPositionInStream, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -302,9 +320,11 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
}
|
||||
|
||||
@Override
|
||||
public GetRecordsRequest getGetRecordsRequest(String nextIterator) {
|
||||
return KinesisRequestsBuilder.getRecordsRequestBuilder().shardIterator(nextIterator)
|
||||
.limit(maxRecords).build();
|
||||
public GetRecordsRequest getGetRecordsRequest(String nextIterator) {
|
||||
GetRecordsRequest.Builder builder = KinesisRequestsBuilder.getRecordsRequestBuilder()
|
||||
.shardIterator(nextIterator).limit(maxRecords);
|
||||
streamIdentifier.streamArnOptional().ifPresent(arn -> builder.streamARN(arn.toString()));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -316,7 +336,6 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
|
||||
@Override
|
||||
public GetRecordsResponse getRecords(@NonNull final String nextIterator) {
|
||||
final AWSExceptionManager exceptionManager = createExceptionManager();
|
||||
GetRecordsRequest request = getGetRecordsRequest(nextIterator);
|
||||
|
||||
final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, OPERATION);
|
||||
|
|
@ -329,7 +348,7 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
success = true;
|
||||
return response;
|
||||
} catch (ExecutionException e) {
|
||||
throw exceptionManager.apply(e.getCause());
|
||||
throw AWS_EXCEPTION_MANAGER.apply(e.getCause());
|
||||
} catch (InterruptedException e) {
|
||||
// TODO: Check behavior
|
||||
log.debug("{} : Interrupt called on method, shutdown initiated", streamAndShardId);
|
||||
|
|
@ -343,7 +362,7 @@ public class KinesisDataFetcher implements DataFetcher {
|
|||
}
|
||||
}
|
||||
|
||||
private AWSExceptionManager createExceptionManager() {
|
||||
private static AWSExceptionManager createExceptionManager() {
|
||||
final AWSExceptionManager exceptionManager = new AWSExceptionManager();
|
||||
exceptionManager.add(ResourceNotFoundException.class, t -> t);
|
||||
exceptionManager.add(KinesisException.class, t -> t);
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package software.amazon.kinesis.retrieval.polling;
|
|||
import java.time.Duration;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
import lombok.Data;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.NonNull;
|
||||
|
|
@ -47,8 +46,6 @@ public class PollingConfig implements RetrievalSpecificConfig {
|
|||
Function<DataFetcherProviderConfig, DataFetcher> dataFetcherProvider;
|
||||
/**
|
||||
* Name of the Kinesis stream.
|
||||
*
|
||||
* @return String
|
||||
*/
|
||||
private String streamName;
|
||||
|
||||
|
|
@ -63,8 +60,6 @@ public class PollingConfig implements RetrievalSpecificConfig {
|
|||
|
||||
/**
|
||||
* Client used to access to Kinesis service.
|
||||
*
|
||||
* @return {@link KinesisAsyncClient}
|
||||
*/
|
||||
@NonNull
|
||||
private final KinesisAsyncClient kinesisClient;
|
||||
|
|
@ -142,10 +137,20 @@ public class PollingConfig implements RetrievalSpecificConfig {
|
|||
@Override
|
||||
public RetrievalFactory retrievalFactory() {
|
||||
// Prioritize the PollingConfig specified value if its updated.
|
||||
if(usePollingConfigIdleTimeValue) {
|
||||
if (usePollingConfigIdleTimeValue) {
|
||||
recordsFetcherFactory.idleMillisBetweenCalls(idleTimeBetweenReadsInMillis);
|
||||
}
|
||||
return new SynchronousBlockingRetrievalFactory(streamName(), kinesisClient(), recordsFetcherFactory,
|
||||
maxRecords(), kinesisRequestTimeout, dataFetcherProvider);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validateState(final boolean isMultiStream) {
|
||||
if (isMultiStream) {
|
||||
if (streamName() != null) {
|
||||
throw new IllegalArgumentException(
|
||||
"PollingConfig must not have streamName configured in multi-stream mode");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ import static software.amazon.kinesis.common.DiagnosticUtils.takeDelayedDelivery
|
|||
* i.e. the byte size of the records stored in the cache and maxRecordsCount i.e. the max number of records that should
|
||||
* be present in the cache across multiple GetRecordsResult object. If no data is available in the cache, the call from
|
||||
* the record processor is blocked till records are retrieved from Kinesis.
|
||||
*
|
||||
* <br/><br/>
|
||||
* There are three threads namely publisher, demand-notifier and ack-notifier which will contend to drain the events
|
||||
* to the Subscriber (ShardConsumer in KCL).
|
||||
*/
|
||||
|
|
@ -81,9 +81,9 @@ public class PrefetchRecordsPublisher implements RecordsPublisher {
|
|||
// Since this package is being used by all KCL clients keeping the upper threshold of 60 seconds
|
||||
private static final long DEFAULT_AWAIT_TERMINATION_TIMEOUT_MILLIS = 60_000L;
|
||||
|
||||
private int maxPendingProcessRecordsInput;
|
||||
private int maxByteSize;
|
||||
private int maxRecordsCount;
|
||||
private final int maxPendingProcessRecordsInput;
|
||||
private final int maxByteSize;
|
||||
private final int maxRecordsCount;
|
||||
private final int maxRecordsPerCall;
|
||||
private final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy;
|
||||
private final ExecutorService executorService;
|
||||
|
|
@ -327,7 +327,7 @@ public class PrefetchRecordsPublisher implements RecordsPublisher {
|
|||
}
|
||||
resetLock.writeLock().lock();
|
||||
try {
|
||||
publisherSession.reset((PrefetchRecordsRetrieved)recordsRetrieved);
|
||||
publisherSession.reset((PrefetchRecordsRetrieved) recordsRetrieved);
|
||||
wasReset = true;
|
||||
} finally {
|
||||
resetLock.writeLock().unlock();
|
||||
|
|
@ -447,7 +447,6 @@ public class PrefetchRecordsPublisher implements RecordsPublisher {
|
|||
|
||||
}
|
||||
|
||||
|
||||
private class DefaultGetRecordsCacheDaemon implements Runnable {
|
||||
volatile boolean isShutdown = false;
|
||||
|
||||
|
|
@ -483,7 +482,6 @@ public class PrefetchRecordsPublisher implements RecordsPublisher {
|
|||
MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, operation);
|
||||
if (publisherSession.prefetchCounters().shouldGetNewRecords()) {
|
||||
try {
|
||||
|
||||
sleepBeforeNextCall();
|
||||
GetRecordsResponse getRecordsResult = getRecordsRetrievalStrategy.getRecords(maxRecordsPerCall);
|
||||
lastSuccessfulCall = Instant.now();
|
||||
|
|
@ -502,6 +500,8 @@ public class PrefetchRecordsPublisher implements RecordsPublisher {
|
|||
calculateHighestSequenceNumber(processRecordsInput), getRecordsResult.nextShardIterator(),
|
||||
PrefetchRecordsRetrieved.generateBatchUniqueIdentifier());
|
||||
publisherSession.highestSequenceNumber(recordsRetrieved.lastBatchSequenceNumber);
|
||||
log.debug("Last sequence number retrieved for streamAndShardId {} is {}", streamAndShardId,
|
||||
recordsRetrieved.lastBatchSequenceNumber);
|
||||
addArrivedRecordsInput(recordsRetrieved);
|
||||
drainQueueForRequests();
|
||||
} catch (PositionResetException pse) {
|
||||
|
|
@ -555,7 +555,7 @@ public class PrefetchRecordsPublisher implements RecordsPublisher {
|
|||
return;
|
||||
}
|
||||
// Add a sleep if lastSuccessfulCall is still null but this is not the first try to avoid retry storm
|
||||
if(lastSuccessfulCall == null) {
|
||||
if (lastSuccessfulCall == null) {
|
||||
Thread.sleep(idleMillisBetweenCalls);
|
||||
return;
|
||||
}
|
||||
|
|
@ -563,6 +563,9 @@ public class PrefetchRecordsPublisher implements RecordsPublisher {
|
|||
if (timeSinceLastCall < idleMillisBetweenCalls) {
|
||||
Thread.sleep(idleMillisBetweenCalls - timeSinceLastCall);
|
||||
}
|
||||
|
||||
// avoid immediate-retry storms
|
||||
lastSuccessfulCall = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@
|
|||
*/
|
||||
package software.amazon.kinesis.retrieval.polling;
|
||||
|
||||
import java.util.Optional;
|
||||
import lombok.Data;
|
||||
import lombok.NonNull;
|
||||
import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,304 @@
|
|||
package software.amazon.kinesis.application;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import lombok.Data;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import software.amazon.awssdk.core.SdkBytes;
|
||||
import software.amazon.awssdk.regions.Region;
|
||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
||||
import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest;
|
||||
import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse;
|
||||
import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
|
||||
import software.amazon.awssdk.services.kinesis.model.ScalingType;
|
||||
import software.amazon.awssdk.services.kinesis.model.UpdateShardCountRequest;
|
||||
import software.amazon.awssdk.services.kinesis.model.UpdateShardCountResponse;
|
||||
import software.amazon.kinesis.checkpoint.CheckpointConfig;
|
||||
import software.amazon.kinesis.common.ConfigsBuilder;
|
||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
||||
import software.amazon.kinesis.config.KCLAppConfig;
|
||||
import software.amazon.kinesis.coordinator.CoordinatorConfig;
|
||||
import software.amazon.kinesis.coordinator.Scheduler;
|
||||
import software.amazon.kinesis.leases.LeaseManagementConfig;
|
||||
import software.amazon.kinesis.lifecycle.LifecycleConfig;
|
||||
import software.amazon.kinesis.metrics.MetricsConfig;
|
||||
import software.amazon.kinesis.processor.ProcessorConfig;
|
||||
import software.amazon.kinesis.retrieval.RetrievalConfig;
|
||||
import software.amazon.kinesis.utils.LeaseTableManager;
|
||||
import software.amazon.kinesis.utils.RecordValidationStatus;
|
||||
import software.amazon.kinesis.utils.ReshardOptions;
|
||||
import software.amazon.kinesis.utils.StreamExistenceManager;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
@Slf4j
|
||||
public class TestConsumer {
|
||||
public final KCLAppConfig consumerConfig;
|
||||
public final Region region;
|
||||
public final String streamName;
|
||||
public final KinesisAsyncClient kinesisClient;
|
||||
private MetricsConfig metricsConfig;
|
||||
private RetrievalConfig retrievalConfig;
|
||||
private CheckpointConfig checkpointConfig;
|
||||
private CoordinatorConfig coordinatorConfig;
|
||||
private LeaseManagementConfig leaseManagementConfig;
|
||||
private LifecycleConfig lifecycleConfig;
|
||||
private ProcessorConfig processorConfig;
|
||||
private Scheduler scheduler;
|
||||
private ScheduledExecutorService producerExecutor;
|
||||
private ScheduledFuture<?> producerFuture;
|
||||
private ScheduledExecutorService consumerExecutor;
|
||||
private ScheduledFuture<?> consumerFuture;
|
||||
private DynamoDbAsyncClient dynamoClient;
|
||||
private final ObjectMapper mapper = new ObjectMapper();
|
||||
public int successfulPutRecords = 0;
|
||||
public BigInteger payloadCounter = new BigInteger("0");
|
||||
|
||||
public TestConsumer(KCLAppConfig consumerConfig) throws Exception {
|
||||
this.consumerConfig = consumerConfig;
|
||||
this.region = consumerConfig.getRegion();
|
||||
this.streamName = consumerConfig.getStreamName();
|
||||
this.kinesisClient = consumerConfig.buildAsyncKinesisClient();
|
||||
this.dynamoClient = consumerConfig.buildAsyncDynamoDbClient();
|
||||
}
|
||||
|
||||
public void run() throws Exception {
|
||||
|
||||
final StreamExistenceManager streamExistenceManager = new StreamExistenceManager(this.consumerConfig);
|
||||
final LeaseTableManager leaseTableManager = new LeaseTableManager(this.dynamoClient);
|
||||
|
||||
// Clean up any old streams or lease tables left in test environment
|
||||
cleanTestResources(streamExistenceManager, leaseTableManager);
|
||||
|
||||
// Check if stream is created. If not, create it
|
||||
streamExistenceManager.checkStreamAndCreateIfNecessary(this.streamName);
|
||||
|
||||
startProducer();
|
||||
setUpConsumerResources();
|
||||
|
||||
try {
|
||||
startConsumer();
|
||||
|
||||
// Sleep to allow the producer/consumer to run and then end the test case.
|
||||
// If non-reshard sleep 3 minutes, else sleep 4 minutes per scale.
|
||||
final int sleepMinutes = (consumerConfig.getReshardFactorList() == null) ? 3 : (4 * consumerConfig.getReshardFactorList().size());
|
||||
Thread.sleep(TimeUnit.MINUTES.toMillis(sleepMinutes));
|
||||
|
||||
// Stops sending dummy data.
|
||||
stopProducer();
|
||||
|
||||
// Wait a few seconds for the last few records to be processed
|
||||
Thread.sleep(TimeUnit.SECONDS.toMillis(10));
|
||||
|
||||
// Finishes processing current batch of data already received from Kinesis before shutting down.
|
||||
awaitConsumerFinish();
|
||||
|
||||
// Validate processed data
|
||||
validateRecordProcessor();
|
||||
|
||||
} catch (Exception e) {
|
||||
// Test Failed. Clean up resources and then throw exception.
|
||||
log.info("----------Test Failed: Cleaning up resources------------");
|
||||
throw e;
|
||||
} finally {
|
||||
// Clean up resources created
|
||||
deleteResources(streamExistenceManager, leaseTableManager);
|
||||
}
|
||||
}
|
||||
|
||||
private void cleanTestResources(StreamExistenceManager streamExistenceManager, LeaseTableManager leaseTableManager) throws Exception {
|
||||
log.info("----------Before starting, Cleaning test environment----------");
|
||||
log.info("----------Deleting all lease tables in account----------");
|
||||
leaseTableManager.deleteAllResource();
|
||||
log.info("----------Finished deleting all lease tables-------------");
|
||||
|
||||
log.info("----------Deleting all streams in account----------");
|
||||
streamExistenceManager.deleteAllResource();
|
||||
log.info("----------Finished deleting all streams-------------");
|
||||
}
|
||||
|
||||
private void startProducer() {
|
||||
this.producerExecutor = Executors.newSingleThreadScheduledExecutor();
|
||||
this.producerFuture = producerExecutor.scheduleAtFixedRate(this::publishRecord, 10, 1, TimeUnit.SECONDS);
|
||||
|
||||
// Reshard logic if required for the test
|
||||
if (consumerConfig.getReshardFactorList() != null) {
|
||||
log.info("----Reshard Config found: {}", consumerConfig.getReshardFactorList());
|
||||
|
||||
final StreamScaler s = new StreamScaler(
|
||||
kinesisClient,
|
||||
consumerConfig.getStreamName(),
|
||||
consumerConfig.getReshardFactorList(),
|
||||
consumerConfig
|
||||
);
|
||||
|
||||
// Schedule the stream scales 4 minutes apart with 2 minute starting delay
|
||||
for (int i = 0; i < consumerConfig.getReshardFactorList().size(); i++) {
|
||||
producerExecutor.schedule(s, (4 * i) + 2, TimeUnit.MINUTES);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void setUpConsumerResources() throws Exception {
|
||||
// Setup configuration of KCL (including DynamoDB and CloudWatch)
|
||||
final ConfigsBuilder configsBuilder = consumerConfig.getConfigsBuilder();
|
||||
|
||||
retrievalConfig = consumerConfig.getRetrievalConfig();
|
||||
checkpointConfig = configsBuilder.checkpointConfig();
|
||||
coordinatorConfig = configsBuilder.coordinatorConfig();
|
||||
leaseManagementConfig = configsBuilder.leaseManagementConfig()
|
||||
.initialPositionInStream(
|
||||
InitialPositionInStreamExtended.newInitialPosition(consumerConfig.getInitialPosition())
|
||||
)
|
||||
.initialLeaseTableReadCapacity(50).initialLeaseTableWriteCapacity(50);
|
||||
lifecycleConfig = configsBuilder.lifecycleConfig();
|
||||
processorConfig = configsBuilder.processorConfig();
|
||||
metricsConfig = configsBuilder.metricsConfig();
|
||||
|
||||
// Create Scheduler
|
||||
this.scheduler = new Scheduler(
|
||||
checkpointConfig,
|
||||
coordinatorConfig,
|
||||
leaseManagementConfig,
|
||||
lifecycleConfig,
|
||||
metricsConfig,
|
||||
processorConfig,
|
||||
retrievalConfig
|
||||
);
|
||||
}
|
||||
|
||||
private void startConsumer() {
|
||||
// Start record processing of dummy data
|
||||
this.consumerExecutor = Executors.newSingleThreadScheduledExecutor();
|
||||
this.consumerFuture = consumerExecutor.schedule(scheduler, 0, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
private void stopProducer() {
|
||||
log.info("Cancelling producer and shutting down executor.");
|
||||
if (producerFuture != null) {
|
||||
producerFuture.cancel(false);
|
||||
}
|
||||
if (producerExecutor != null) {
|
||||
producerExecutor.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
public void publishRecord() {
|
||||
final PutRecordRequest request;
|
||||
try {
|
||||
request = PutRecordRequest.builder()
|
||||
.partitionKey(RandomStringUtils.randomAlphabetic(5, 20))
|
||||
.streamName(this.streamName)
|
||||
.data(SdkBytes.fromByteBuffer(wrapWithCounter(5, payloadCounter))) // 1024 is 1 KB
|
||||
.build();
|
||||
kinesisClient.putRecord(request).get();
|
||||
|
||||
// Increment the payload counter if the putRecord call was successful
|
||||
payloadCounter = payloadCounter.add(new BigInteger("1"));
|
||||
successfulPutRecords += 1;
|
||||
log.info("---------Record published, successfulPutRecords is now: {}", successfulPutRecords);
|
||||
} catch (InterruptedException e) {
|
||||
log.info("Interrupted, assuming shutdown. ", e);
|
||||
} catch (ExecutionException | RuntimeException e) {
|
||||
log.error("Error during publish records", e);
|
||||
}
|
||||
}
|
||||
|
||||
private ByteBuffer wrapWithCounter(int payloadSize, BigInteger payloadCounter) throws RuntimeException {
|
||||
final byte[] returnData;
|
||||
log.info("---------Putting record with data: {}", payloadCounter);
|
||||
try {
|
||||
returnData = mapper.writeValueAsBytes(payloadCounter);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Error converting object to bytes: ", e);
|
||||
}
|
||||
return ByteBuffer.wrap(returnData);
|
||||
}
|
||||
|
||||
private void awaitConsumerFinish() throws Exception {
|
||||
Future<Boolean> gracefulShutdownFuture = scheduler.startGracefulShutdown();
|
||||
log.info("Waiting up to 20 seconds for shutdown to complete.");
|
||||
try {
|
||||
gracefulShutdownFuture.get(20, TimeUnit.SECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
log.info("Interrupted while waiting for graceful shutdown. Continuing.");
|
||||
} catch (ExecutionException | TimeoutException e) {
|
||||
scheduler.shutdown();
|
||||
}
|
||||
log.info("Completed, shutting down now.");
|
||||
}
|
||||
|
||||
private void validateRecordProcessor() throws Exception {
|
||||
log.info("The number of expected records is: {}", successfulPutRecords);
|
||||
final RecordValidationStatus errorVal = consumerConfig.getRecordValidator().validateRecords(successfulPutRecords);
|
||||
if (errorVal != RecordValidationStatus.NO_ERROR) {
|
||||
throw new RuntimeException("There was an error validating the records that were processed: " + errorVal.toString());
|
||||
}
|
||||
log.info("---------Completed validation of processed records.---------");
|
||||
}
|
||||
|
||||
private void deleteResources(StreamExistenceManager streamExistenceManager, LeaseTableManager leaseTableManager) throws Exception {
|
||||
log.info("-------------Start deleting stream.---------");
|
||||
streamExistenceManager.deleteResource(this.streamName);
|
||||
log.info("---------Start deleting lease table.---------");
|
||||
leaseTableManager.deleteResource(this.consumerConfig.getStreamName());
|
||||
log.info("---------Finished deleting resources.---------");
|
||||
}
|
||||
|
||||
@Data
|
||||
private static class StreamScaler implements Runnable {
|
||||
private final KinesisAsyncClient client;
|
||||
private final String streamName;
|
||||
private final List<ReshardOptions> scalingFactors;
|
||||
private final KCLAppConfig consumerConfig;
|
||||
private int scalingFactorIdx = 0;
|
||||
private DescribeStreamSummaryRequest describeStreamSummaryRequest;
|
||||
|
||||
private synchronized void scaleStream() throws InterruptedException, ExecutionException {
|
||||
final DescribeStreamSummaryResponse response = client.describeStreamSummary(describeStreamSummaryRequest).get();
|
||||
|
||||
final int openShardCount = response.streamDescriptionSummary().openShardCount();
|
||||
final int targetShardCount = scalingFactors.get(scalingFactorIdx).calculateShardCount(openShardCount);
|
||||
|
||||
log.info("Scaling stream {} from {} shards to {} shards w/ scaling factor {}",
|
||||
streamName, openShardCount, targetShardCount, scalingFactors.get(scalingFactorIdx));
|
||||
|
||||
final UpdateShardCountRequest updateShardCountRequest = UpdateShardCountRequest.builder()
|
||||
.streamName(streamName).targetShardCount(targetShardCount).scalingType(ScalingType.UNIFORM_SCALING).build();
|
||||
final UpdateShardCountResponse shardCountResponse = client.updateShardCount(updateShardCountRequest).get();
|
||||
log.info("Executed shard scaling request. Response Details : {}", shardCountResponse.toString());
|
||||
|
||||
scalingFactorIdx++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (scalingFactors.size() == 0 || scalingFactorIdx >= scalingFactors.size()) {
|
||||
log.info("No scaling factor found in list");
|
||||
return;
|
||||
}
|
||||
log.info("Starting stream scaling with params : {}", this);
|
||||
|
||||
if (describeStreamSummaryRequest == null) {
|
||||
describeStreamSummaryRequest = DescribeStreamSummaryRequest.builder().streamName(streamName).build();
|
||||
}
|
||||
try {
|
||||
scaleStream();
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
log.error("Caught error while scaling shards for stream", e);
|
||||
} finally {
|
||||
log.info("Reshard List State : {}", scalingFactors);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
package software.amazon.kinesis.application;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.slf4j.MDC;
|
||||
import software.amazon.kinesis.exceptions.InvalidStateException;
|
||||
import software.amazon.kinesis.exceptions.ShutdownException;
|
||||
import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
|
||||
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
||||
import software.amazon.kinesis.lifecycle.events.ShardEndedInput;
|
||||
import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
||||
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
||||
import software.amazon.kinesis.utils.RecordValidatorQueue;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/**
|
||||
* Implement initialization and deletion of shards and shard record processing
|
||||
*/
|
||||
@Slf4j
|
||||
public class TestRecordProcessor implements ShardRecordProcessor {
|
||||
|
||||
private static final String SHARD_ID_MDC_KEY = "ShardId";
|
||||
|
||||
private String shardId;
|
||||
|
||||
private final RecordValidatorQueue recordValidator;
|
||||
|
||||
public TestRecordProcessor(RecordValidatorQueue recordValidator) {
|
||||
this.recordValidator = recordValidator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(InitializationInput initializationInput) {
|
||||
shardId = initializationInput.shardId();
|
||||
MDC.put(SHARD_ID_MDC_KEY, shardId);
|
||||
try {
|
||||
log.info("Initializing @ Sequence: {}", initializationInput.extendedSequenceNumber());
|
||||
} finally {
|
||||
MDC.remove(SHARD_ID_MDC_KEY);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void processRecords(ProcessRecordsInput processRecordsInput) {
|
||||
MDC.put(SHARD_ID_MDC_KEY, shardId);
|
||||
try {
|
||||
log.info("Processing {} record(s)", processRecordsInput.records().size());
|
||||
|
||||
for (KinesisClientRecord kinesisRecord : processRecordsInput.records()) {
|
||||
final String data = new String(asByteArray(kinesisRecord.data()));
|
||||
log.info("Processing record pk: {}", data);
|
||||
recordValidator.add(shardId, data);
|
||||
}
|
||||
|
||||
} catch (Throwable t) {
|
||||
log.error("Caught throwable while processing records. Aborting.", t);
|
||||
Runtime.getRuntime().halt(1);
|
||||
} finally {
|
||||
MDC.remove(SHARD_ID_MDC_KEY);
|
||||
}
|
||||
}
|
||||
|
||||
public static byte[] asByteArray(ByteBuffer buf) {
|
||||
byte[] bytes = new byte[buf.remaining()];
|
||||
buf.get(bytes);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void leaseLost(LeaseLostInput leaseLostInput) {
|
||||
MDC.put(SHARD_ID_MDC_KEY, shardId);
|
||||
try {
|
||||
log.info("Lost lease, so terminating.");
|
||||
} finally {
|
||||
MDC.remove(SHARD_ID_MDC_KEY);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shardEnded(ShardEndedInput shardEndedInput) {
|
||||
MDC.put(SHARD_ID_MDC_KEY, shardId);
|
||||
try {
|
||||
log.info("Reached shard end checkpointing.");
|
||||
shardEndedInput.checkpointer().checkpoint();
|
||||
} catch (ShutdownException | InvalidStateException e) {
|
||||
log.error("Exception while checkpointing at shard end. Giving up.", e);
|
||||
} finally {
|
||||
MDC.remove(SHARD_ID_MDC_KEY);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) {
|
||||
MDC.put(SHARD_ID_MDC_KEY, shardId);
|
||||
try {
|
||||
log.info("Scheduler is shutting down, checkpointing.");
|
||||
shutdownRequestedInput.checkpointer().checkpoint();
|
||||
} catch (ShutdownException | InvalidStateException e) {
|
||||
log.error("Exception while checkpointing at requested shutdown. Giving up.", e);
|
||||
} finally {
|
||||
MDC.remove(SHARD_ID_MDC_KEY);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
package software.amazon.kinesis.application;
|
||||
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
||||
import software.amazon.kinesis.utils.RecordValidatorQueue;
|
||||
|
||||
public class TestRecordProcessorFactory implements ShardRecordProcessorFactory {
|
||||
|
||||
private final RecordValidatorQueue recordValidator;
|
||||
|
||||
public TestRecordProcessorFactory(RecordValidatorQueue recordValidator) {
|
||||
this.recordValidator = recordValidator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShardRecordProcessor shardRecordProcessor() {
|
||||
return new TestRecordProcessor(this.recordValidator);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -37,9 +37,9 @@ public class CheckpointerTest {
|
|||
|
||||
@Test
|
||||
public final void testInitialSetCheckpoint() throws Exception {
|
||||
String sequenceNumber = "1";
|
||||
String sequenceNumber = "1";
|
||||
String shardId = "myShardId";
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber);
|
||||
checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken);
|
||||
ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId);
|
||||
Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint);
|
||||
|
|
@ -49,8 +49,8 @@ public class CheckpointerTest {
|
|||
public final void testAdvancingSetCheckpoint() throws Exception {
|
||||
String shardId = "myShardId";
|
||||
for (Integer i = 0; i < 10; i++) {
|
||||
String sequenceNumber = i.toString();
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber);
|
||||
String sequenceNumber = i.toString();
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber);
|
||||
checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken);
|
||||
ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId);
|
||||
Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint);
|
||||
|
|
@ -67,7 +67,7 @@ public class CheckpointerTest {
|
|||
String checkpointValue = "12345";
|
||||
String shardId = "testShardId-1";
|
||||
String concurrencyToken = "token-1";
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(checkpointValue);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(checkpointValue);
|
||||
checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken);
|
||||
Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId));
|
||||
Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).checkpoint());
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ package software.amazon.kinesis.checkpoint;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import software.amazon.kinesis.exceptions.KinesisClientLibException;
|
||||
import software.amazon.kinesis.processor.Checkpointer;
|
||||
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
||||
|
||||
|
|
@ -39,8 +38,7 @@ public class InMemoryCheckpointer implements Checkpointer {
|
|||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void setCheckpoint(String leaseKey, ExtendedSequenceNumber checkpointValue, String concurrencyToken)
|
||||
throws KinesisClientLibException {
|
||||
public void setCheckpoint(String leaseKey, ExtendedSequenceNumber checkpointValue, String concurrencyToken) {
|
||||
checkpoints.put(leaseKey, checkpointValue);
|
||||
flushpoints.put(leaseKey, checkpointValue);
|
||||
pendingCheckpoints.remove(leaseKey);
|
||||
|
|
@ -49,33 +47,32 @@ public class InMemoryCheckpointer implements Checkpointer {
|
|||
if (log.isDebugEnabled()) {
|
||||
log.debug("shardId: {} checkpoint: {}", leaseKey, checkpointValue);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public ExtendedSequenceNumber getCheckpoint(String leaseKey) throws KinesisClientLibException {
|
||||
public ExtendedSequenceNumber getCheckpoint(String leaseKey) {
|
||||
ExtendedSequenceNumber checkpoint = flushpoints.get(leaseKey);
|
||||
log.debug("checkpoint shardId: {} checkpoint: {}", leaseKey, checkpoint);
|
||||
return checkpoint;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken)
|
||||
throws KinesisClientLibException {
|
||||
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken) {
|
||||
prepareCheckpoint(leaseKey, pendingCheckpoint, concurrencyToken, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, byte[] pendingCheckpointState) throws KinesisClientLibException {
|
||||
public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken,
|
||||
byte[] pendingCheckpointState) {
|
||||
pendingCheckpoints.put(leaseKey, pendingCheckpoint);
|
||||
pendingCheckpointStates.put(leaseKey, pendingCheckpointState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Checkpoint getCheckpointObject(String leaseKey) throws KinesisClientLibException {
|
||||
public Checkpoint getCheckpointObject(String leaseKey) {
|
||||
ExtendedSequenceNumber checkpoint = flushpoints.get(leaseKey);
|
||||
ExtendedSequenceNumber pendingCheckpoint = pendingCheckpoints.get(leaseKey);
|
||||
byte[] pendingCheckpointState = pendingCheckpointStates.get(leaseKey);
|
||||
|
|
|
|||
|
|
@ -91,11 +91,11 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
*/
|
||||
@Test
|
||||
public final void testCheckpointRecord() throws Exception {
|
||||
ShardRecordProcessorCheckpointer processingCheckpointer =
|
||||
ShardRecordProcessorCheckpointer processingCheckpointer =
|
||||
new ShardRecordProcessorCheckpointer(shardInfo, checkpoint);
|
||||
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5025");
|
||||
Record record = makeRecord("5025");
|
||||
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5025");
|
||||
Record record = makeRecord("5025");
|
||||
processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber);
|
||||
processingCheckpointer.checkpoint(record);
|
||||
assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber));
|
||||
|
|
@ -107,13 +107,13 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
*/
|
||||
@Test
|
||||
public final void testCheckpointSubRecord() throws Exception {
|
||||
ShardRecordProcessorCheckpointer processingCheckpointer =
|
||||
ShardRecordProcessorCheckpointer processingCheckpointer =
|
||||
new ShardRecordProcessorCheckpointer(shardInfo, checkpoint);
|
||||
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030");
|
||||
Record record = makeRecord("5030");
|
||||
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030");
|
||||
Record record = makeRecord("5030");
|
||||
//UserRecord subRecord = new UserRecord(record);
|
||||
processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber);
|
||||
processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber);
|
||||
processingCheckpointer.checkpoint(record);
|
||||
assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber));
|
||||
}
|
||||
|
|
@ -124,11 +124,11 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
*/
|
||||
@Test
|
||||
public final void testCheckpointSequenceNumber() throws Exception {
|
||||
ShardRecordProcessorCheckpointer processingCheckpointer =
|
||||
ShardRecordProcessorCheckpointer processingCheckpointer =
|
||||
new ShardRecordProcessorCheckpointer(shardInfo, checkpoint);
|
||||
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035");
|
||||
processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber);
|
||||
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035");
|
||||
processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber);
|
||||
processingCheckpointer.checkpoint("5035");
|
||||
assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber));
|
||||
}
|
||||
|
|
@ -139,11 +139,11 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
*/
|
||||
@Test
|
||||
public final void testCheckpointExtendedSequenceNumber() throws Exception {
|
||||
ShardRecordProcessorCheckpointer processingCheckpointer =
|
||||
ShardRecordProcessorCheckpointer processingCheckpointer =
|
||||
new ShardRecordProcessorCheckpointer(shardInfo, checkpoint);
|
||||
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040");
|
||||
processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber);
|
||||
processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber);
|
||||
ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040");
|
||||
processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber);
|
||||
processingCheckpointer.checkpoint("5040", 0);
|
||||
assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber));
|
||||
}
|
||||
|
|
@ -397,7 +397,7 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
assertThat(checkpointer.largestPermittedCheckpointValue(), equalTo(sequenceNumber));
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* This test is a mixed test of checking some basic functionality of checkpointing at a sequence number and making
|
||||
* sure certain bounds checks and validations are being performed inside the checkpointer to prevent clients from
|
||||
* checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be checkpointing
|
||||
|
|
@ -444,7 +444,7 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string
|
||||
ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max
|
||||
ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value
|
||||
ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value
|
||||
ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value
|
||||
};
|
||||
for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) {
|
||||
try {
|
||||
|
|
@ -477,7 +477,7 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
processingCheckpointer.lastCheckpointValue(), equalTo(ExtendedSequenceNumber.SHARD_END));
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* This test is a mixed test of checking some basic functionality of two phase checkpointing at a sequence number
|
||||
* and making sure certain bounds checks and validations are being performed inside the checkpointer to prevent
|
||||
* clients from checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be
|
||||
|
|
@ -548,7 +548,7 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string
|
||||
ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max
|
||||
ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value
|
||||
ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value
|
||||
ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value
|
||||
};
|
||||
for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) {
|
||||
try {
|
||||
|
|
@ -566,7 +566,6 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
assertThat("Largest sequence number should not have changed",
|
||||
processingCheckpointer.largestPermittedCheckpointValue(), equalTo(thirdSequenceNumber));
|
||||
assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue());
|
||||
|
||||
}
|
||||
|
||||
// advance to third number
|
||||
|
|
@ -601,7 +600,6 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
@Test
|
||||
public final void testMixedCheckpointCalls() throws Exception {
|
||||
for (LinkedHashMap<String, CheckpointAction> testPlan : getMixedCallsTestPlan()) {
|
||||
|
|
@ -617,7 +615,6 @@ public class ShardShardRecordProcessorCheckpointerTest {
|
|||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
@Test
|
||||
public final void testMixedTwoPhaseCheckpointCalls() throws Exception {
|
||||
for (LinkedHashMap<String, CheckpointAction> testPlan : getMixedCallsTestPlan()) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package software.amazon.kinesis.common;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.runners.MockitoJUnitRunner;
|
||||
import software.amazon.awssdk.arns.Arn;
|
||||
import software.amazon.awssdk.regions.Region;
|
||||
import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
|
||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
||||
import software.amazon.kinesis.processor.MultiStreamTracker;
|
||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
||||
import software.amazon.kinesis.processor.SingleStreamTracker;
|
||||
import software.amazon.kinesis.processor.StreamTracker;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class ConfigsBuilderTest {
|
||||
|
||||
@Mock
|
||||
private KinesisAsyncClient mockKinesisClient;
|
||||
|
||||
@Mock
|
||||
private DynamoDbAsyncClient mockDynamoClient;
|
||||
|
||||
@Mock
|
||||
private CloudWatchAsyncClient mockCloudWatchClient;
|
||||
|
||||
@Mock
|
||||
private ShardRecordProcessorFactory mockShardProcessorFactory;
|
||||
|
||||
private static final String APPLICATION_NAME = ConfigsBuilderTest.class.getSimpleName();
|
||||
private static final String WORKER_IDENTIFIER = "worker-id";
|
||||
|
||||
@Test
|
||||
public void testSingleStreamTrackerConstruction() {
|
||||
final String streamName = "single-stream";
|
||||
final Arn streamArn = createArn(streamName);
|
||||
|
||||
for (final ConfigsBuilder cb : Arrays.asList(
|
||||
createConfig(streamName),
|
||||
createConfig(new SingleStreamTracker(streamName)),
|
||||
createConfig(streamArn),
|
||||
createConfig(new SingleStreamTracker(streamArn)))) {
|
||||
assertEquals(Optional.empty(), cb.appStreamTracker().left());
|
||||
assertEquals(streamName, cb.appStreamTracker().right().get());
|
||||
assertEquals(streamName, cb.streamTracker().streamConfigList().get(0).streamIdentifier().streamName());
|
||||
assertFalse(cb.streamTracker().isMultiStream());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultiStreamTrackerConstruction() {
|
||||
final StreamTracker mockMultiStreamTracker = mock(MultiStreamTracker.class);
|
||||
final ConfigsBuilder configByMultiTracker = createConfig(mockMultiStreamTracker);
|
||||
assertEquals(Optional.empty(), configByMultiTracker.appStreamTracker().right());
|
||||
assertEquals(mockMultiStreamTracker, configByMultiTracker.appStreamTracker().left().get());
|
||||
assertEquals(mockMultiStreamTracker, configByMultiTracker.streamTracker());
|
||||
}
|
||||
|
||||
private ConfigsBuilder createConfig(String streamName) {
|
||||
// intentional invocation of constructor where streamName is a String
|
||||
return new ConfigsBuilder(streamName, APPLICATION_NAME, mockKinesisClient, mockDynamoClient,
|
||||
mockCloudWatchClient, WORKER_IDENTIFIER, mockShardProcessorFactory);
|
||||
}
|
||||
|
||||
private ConfigsBuilder createConfig(Arn streamArn) {
|
||||
// intentional invocation of constructor where streamArn is an Arn
|
||||
return new ConfigsBuilder(streamArn, APPLICATION_NAME, mockKinesisClient, mockDynamoClient,
|
||||
mockCloudWatchClient, WORKER_IDENTIFIER, mockShardProcessorFactory);
|
||||
}
|
||||
|
||||
private ConfigsBuilder createConfig(StreamTracker streamTracker) {
|
||||
return new ConfigsBuilder(streamTracker, APPLICATION_NAME, mockKinesisClient, mockDynamoClient,
|
||||
mockCloudWatchClient, WORKER_IDENTIFIER, mockShardProcessorFactory);
|
||||
}
|
||||
|
||||
private static Arn createArn(String streamName) {
|
||||
return Arn.builder()
|
||||
.partition("aws")
|
||||
.service("kinesis")
|
||||
.region(Region.US_EAST_1.id())
|
||||
.accountId("123456789012")
|
||||
.resource("stream/" + streamName)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
||||
* Licensed under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package software.amazon.kinesis.common;
|
||||
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
import org.junit.Test;
|
||||
import software.amazon.awssdk.utils.Either;
|
||||
import software.amazon.kinesis.processor.MultiStreamTracker;
|
||||
import software.amazon.kinesis.processor.SingleStreamTracker;
|
||||
import software.amazon.kinesis.processor.StreamTracker;
|
||||
|
||||
public class DeprecationUtilsTest {
|
||||
|
||||
@Test
|
||||
public void testTrackerConversion() {
|
||||
final StreamTracker mockMultiTracker = mock(MultiStreamTracker.class);
|
||||
assertEquals(Either.left(mockMultiTracker), DeprecationUtils.convert(mockMultiTracker, Function.identity()));
|
||||
|
||||
final StreamTracker mockSingleTracker = mock(SingleStreamTracker.class);
|
||||
assertEquals(Either.right(mockSingleTracker), DeprecationUtils.convert(mockSingleTracker, Function.identity()));
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testUnsupportedStreamTrackerConversion() {
|
||||
DeprecationUtils.convert(mock(StreamTracker.class), Function.identity());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
package software.amazon.kinesis.common;
|
||||
|
||||
import static software.amazon.kinesis.common.InitialPositionInStream.TRIM_HORIZON;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
public class StreamConfigTest {
|
||||
|
||||
@Test(expected = NullPointerException.class)
|
||||
public void testNullStreamIdentifier() {
|
||||
new StreamConfig(null, InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,153 @@
|
|||
package software.amazon.kinesis.common;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import software.amazon.awssdk.arns.Arn;
|
||||
import software.amazon.awssdk.regions.Region;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class StreamIdentifierTest {
|
||||
private static final String STREAM_NAME = "stream-name";
|
||||
private static final String PARTITION = "aws";
|
||||
private static final String SERVICE = "kinesis";
|
||||
private static final Region KINESIS_REGION = Region.US_WEST_1;
|
||||
private static final String TEST_ACCOUNT_ID = "123456789012";
|
||||
private static final String RESOURCE = "stream/" + STREAM_NAME;
|
||||
private static final long EPOCH = 1680616058L;
|
||||
private static final Arn DEFAULT_ARN = createArn();
|
||||
|
||||
/**
|
||||
* Test patterns that should match a serialization regex.
|
||||
*/
|
||||
@Test
|
||||
public void testMultiStreamDeserializationSuccess() {
|
||||
final StreamIdentifier siSerialized = StreamIdentifier.multiStreamInstance(serialize());
|
||||
assertEquals(Optional.of(EPOCH), siSerialized.streamCreationEpochOptional());
|
||||
assertActualStreamIdentifierExpected(null, siSerialized);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test patterns that <b>should not</b> match a serialization regex.
|
||||
*/
|
||||
@Test
|
||||
public void testMultiStreamDeserializationFail() {
|
||||
for (final String pattern : Arrays.asList(
|
||||
":stream-name:123", // missing account id
|
||||
// "123456789:stream-name:123", // account id not 12 digits
|
||||
"123456789abc:stream-name:123", // 12char alphanumeric account id
|
||||
"123456789012::123", // missing stream name
|
||||
"123456789012:stream-name", // missing delimiter and creation epoch
|
||||
"123456789012:stream-name:", // missing creation epoch
|
||||
"123456789012:stream-name:-123", // negative creation epoch
|
||||
"123456789012:stream-name:abc", // non-numeric creation epoch
|
||||
""
|
||||
)) {
|
||||
try {
|
||||
StreamIdentifier.multiStreamInstance(pattern);
|
||||
Assert.fail("Serialization " + pattern + " should not have created a StreamIdentifier");
|
||||
} catch (final IllegalArgumentException iae) {
|
||||
// expected; ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test ARNs that <b>should not</b> match a valid AWS Kinesis stream ARN.
|
||||
*/
|
||||
@Test
|
||||
public void testMultiStreamByArnWithInvalidStreamArnFail() {
|
||||
for (final Arn invalidStreamArn : Arrays.asList(
|
||||
createArn("abc", SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE), // invalid partition
|
||||
createArn(PARTITION, "dynamodb", KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE), // incorrect service
|
||||
createArn(PARTITION, SERVICE, null, TEST_ACCOUNT_ID, RESOURCE), // missing region
|
||||
createArn(PARTITION, SERVICE, KINESIS_REGION, null, RESOURCE), // missing account id
|
||||
createArn(PARTITION, SERVICE, KINESIS_REGION, "123456789", RESOURCE), // account id not 12 digits
|
||||
createArn(PARTITION, SERVICE, KINESIS_REGION, "123456789abc", RESOURCE), // 12char alphanumeric account id
|
||||
createArn(PARTITION, SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, "table/name"), // incorrect resource type
|
||||
Arn.fromString("arn:aws:dynamodb:us-east-2:123456789012:table/myDynamoDBTable") // valid ARN for incorrect resource
|
||||
)) {
|
||||
try {
|
||||
StreamIdentifier.multiStreamInstance(invalidStreamArn, EPOCH);
|
||||
Assert.fail("Arn " + invalidStreamArn + " should not have created a StreamIdentifier");
|
||||
} catch (final IllegalArgumentException iae) {
|
||||
// expected; ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testNegativeCreationEpoch() {
|
||||
StreamIdentifier.multiStreamInstance(DEFAULT_ARN, -123);
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testZeroCreationEpoch() {
|
||||
StreamIdentifier.multiStreamInstance(DEFAULT_ARN, 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSingleStreamInstanceFromArn() {
|
||||
final StreamIdentifier actualStreamIdentifier = StreamIdentifier.singleStreamInstance(DEFAULT_ARN);
|
||||
|
||||
assertActualStreamIdentifierExpected(DEFAULT_ARN, actualStreamIdentifier);
|
||||
assertEquals(Optional.empty(), actualStreamIdentifier.streamCreationEpochOptional());
|
||||
assertEquals(actualStreamIdentifier.streamName(), actualStreamIdentifier.serialize());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultiStreamInstanceFromArn() {
|
||||
final StreamIdentifier actualStreamIdentifier = StreamIdentifier.multiStreamInstance(DEFAULT_ARN, EPOCH);
|
||||
|
||||
assertActualStreamIdentifierExpected(DEFAULT_ARN, actualStreamIdentifier);
|
||||
assertEquals(Optional.of(EPOCH), actualStreamIdentifier.streamCreationEpochOptional());
|
||||
assertEquals(serialize(), actualStreamIdentifier.serialize());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSingleStreamInstanceWithName() {
|
||||
StreamIdentifier actualStreamIdentifier = StreamIdentifier.singleStreamInstance(STREAM_NAME);
|
||||
assertEquals(Optional.empty(), actualStreamIdentifier.streamCreationEpochOptional());
|
||||
assertEquals(Optional.empty(), actualStreamIdentifier.accountIdOptional());
|
||||
assertEquals(Optional.empty(), actualStreamIdentifier.streamArnOptional());
|
||||
assertEquals(STREAM_NAME, actualStreamIdentifier.streamName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultiStreamInstanceWithIdentifierSerialization() {
|
||||
StreamIdentifier actualStreamIdentifier = StreamIdentifier.multiStreamInstance(serialize());
|
||||
assertActualStreamIdentifierExpected(null, actualStreamIdentifier);
|
||||
assertEquals(Optional.of(EPOCH), actualStreamIdentifier.streamCreationEpochOptional());
|
||||
}
|
||||
|
||||
private void assertActualStreamIdentifierExpected(Arn expectedArn, StreamIdentifier actual) {
|
||||
assertEquals(STREAM_NAME, actual.streamName());
|
||||
assertEquals(Optional.of(TEST_ACCOUNT_ID), actual.accountIdOptional());
|
||||
assertEquals(Optional.ofNullable(expectedArn), actual.streamArnOptional());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a pattern that matches {@link StreamIdentifier} serialization.
|
||||
*/
|
||||
private static String serialize() {
|
||||
return String.join(":", TEST_ACCOUNT_ID, STREAM_NAME, Long.toString(EPOCH));
|
||||
}
|
||||
|
||||
private static Arn createArn() {
|
||||
return createArn(PARTITION, SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE);
|
||||
}
|
||||
|
||||
private static Arn createArn(String partition, String service, Region region, String account, String resource) {
|
||||
return Arn.builder()
|
||||
.partition(partition)
|
||||
.service(service)
|
||||
.region(region != null ? region.id() : null)
|
||||
.accountId(account)
|
||||
.resource(resource)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue