Compare commits
202 commits
master
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f141a9e1d9 | ||
|
|
f2b5c67337 | ||
|
|
8f99d6f733 | ||
|
|
19818ccd61 | ||
|
|
51ef718fc0 | ||
|
|
bbc0541883 | ||
|
|
acdb964f29 | ||
|
|
bcd13e6a73 | ||
|
|
6f55657443 | ||
|
|
053834f484 | ||
|
|
78877d5b83 | ||
|
|
674629162b | ||
|
|
582451ae31 | ||
|
|
6865469645 | ||
|
|
238d3dd10d | ||
|
|
e174040213 | ||
|
|
1d9bc10d38 | ||
|
|
80dd624dac | ||
|
|
4cefe1a33b | ||
|
|
ae8ad0daba | ||
|
|
48fab1fe71 | ||
|
|
2ab263f273 | ||
|
|
1236d65f78 | ||
|
|
e731486b7f | ||
|
|
1e6506cf97 | ||
|
|
b011206f7b | ||
|
|
db30ac956b | ||
|
|
fe1a34f6e5 | ||
|
|
e6db972226 | ||
|
|
078a3ca2c4 | ||
|
|
640780abfb | ||
|
|
9ab4206844 | ||
|
|
ef251d4a84 | ||
|
|
43edc90c27 | ||
|
|
d5324409ee | ||
|
|
c72d38bdc1 | ||
|
|
367cede700 | ||
|
|
372f98b21a | ||
|
|
251b331a2e | ||
|
|
8dceb3f80f | ||
|
|
94b138a9d9 | ||
|
|
a809b12c43 | ||
|
|
37b5d7b9a1 | ||
|
|
4fd7b7aadb | ||
|
|
1ead07cf9f | ||
|
|
58036d4977 | ||
|
|
07c86081fb | ||
|
|
f9648b4d99 | ||
|
|
d22fecbb8c | ||
|
|
b402ad69b2 | ||
|
|
142653f918 | ||
|
|
269724b8df | ||
|
|
d8e29e8cd7 | ||
|
|
fb1f44f482 | ||
|
|
cf71e4e4e4 | ||
|
|
b314d56308 | ||
|
|
1956e02725 | ||
|
|
f2b9006a98 | ||
|
|
f38dd18ed1 | ||
|
|
a13ed13702 | ||
|
|
b4f6f8fffb | ||
|
|
8ba93e6aee | ||
|
|
4f6f4e5971 | ||
|
|
539550b664 | ||
|
|
9ed1e10fe5 | ||
|
|
5142ce9bc5 | ||
|
|
b04e93ec0d | ||
|
|
530fd477b1 | ||
|
|
c935bc4032 | ||
|
|
03e162b770 | ||
|
|
e873c999cd | ||
|
|
b2a747f3ea | ||
|
|
ddfe75bb2d | ||
|
|
76234d172c | ||
|
|
9cb5020022 | ||
|
|
6fbfc21ad7 | ||
|
|
ff004d75be | ||
|
|
53174940e8 | ||
|
|
1440eeabbc | ||
|
|
aaf4604357 | ||
|
|
142415a213 | ||
|
|
9e353db3db | ||
|
|
dd4e88e621 | ||
|
|
2ae76065a8 | ||
|
|
566a2446c6 | ||
|
|
7b13527d41 | ||
|
|
49231cc6c1 | ||
|
|
700daa7a87 | ||
|
|
a85ef70898 | ||
|
|
34e9016c01 | ||
|
|
1f71042d80 | ||
|
|
66bcd45d36 | ||
|
|
bb84c17abf | ||
|
|
44474a1c05 | ||
|
|
41c08c3b04 | ||
|
|
17ecc87d71 | ||
|
|
226d5113aa | ||
|
|
0d358b682d | ||
|
|
b7468267f4 | ||
|
|
32fc14f535 | ||
|
|
c5632e38bc | ||
|
|
22737c4a5b | ||
|
|
38cef8963a | ||
|
|
6738087a8f | ||
|
|
f713017528 | ||
|
|
c3b41c3b55 | ||
|
|
5f7d4b3bc6 | ||
|
|
6f16b168a4 | ||
|
|
8a296a5aa3 | ||
|
|
089c6ab18e | ||
|
|
5bf24bda43 | ||
|
|
a1eafbb69a | ||
|
|
995511afe5 | ||
|
|
849ffea028 | ||
|
|
4fd63989d3 | ||
|
|
058403f943 | ||
|
|
36ae2f2428 | ||
|
|
5ba87a34ab | ||
|
|
53cc7fc347 | ||
|
|
0a5724c332 | ||
|
|
0760688375 | ||
|
|
57f8c120d0 | ||
|
|
3a88a60a4e | ||
|
|
6b474b7390 | ||
|
|
4f927da81c | ||
|
|
dd72cf96e5 | ||
|
|
fdad20aff4 | ||
|
|
45e76b0fd5 | ||
|
|
550d7af5b1 | ||
|
|
43217fedc6 | ||
|
|
c7cd2f1e75 | ||
|
|
e1111d487e | ||
|
|
418807668d | ||
|
|
f93d01f183 | ||
|
|
74fc856dab | ||
|
|
3cd1bdccf7 | ||
|
|
e9e64f8511 | ||
|
|
e95344b13e | ||
|
|
5760a27962 | ||
|
|
de52856b45 | ||
|
|
94a6324724 | ||
|
|
1d50d766f4 | ||
|
|
02c2036d5d | ||
|
|
a7079a81e4 | ||
|
|
ca15bd4ef5 | ||
|
|
53133fd385 | ||
|
|
5f50f08dcc | ||
|
|
e8180c9804 | ||
|
|
40bc62e1f9 | ||
|
|
bd59461c2c | ||
|
|
5b3078f801 | ||
|
|
2d6b92e8ac | ||
|
|
f369f2114a | ||
|
|
635a101ab4 | ||
|
|
a3e720e8e2 | ||
|
|
7663673f3b | ||
|
|
bddf573c03 | ||
|
|
ebaa5ef788 | ||
|
|
3947b55853 | ||
|
|
5cb2a3500c | ||
|
|
d9a5789bb2 | ||
|
|
cce5b5a620 | ||
|
|
1bdd1d1671 | ||
|
|
34f5e33450 | ||
|
|
ffb34b6d96 | ||
|
|
cc8aa5ef74 | ||
|
|
3e36f0c7d0 | ||
|
|
b537403d81 | ||
|
|
ba984fe279 | ||
|
|
8f58bafacc | ||
|
|
0e910a1dbc | ||
|
|
41f996b833 | ||
|
|
2ca3cbd21c | ||
|
|
c00203f028 | ||
|
|
a55e1b0e8f | ||
|
|
4ec77b1705 | ||
|
|
317dc1ae84 | ||
|
|
f1a50cd14b | ||
|
|
7a1d3031c5 | ||
|
|
d4b72f9f49 | ||
|
|
aa3ab52cdf | ||
|
|
5e4888f431 | ||
|
|
36673e0877 | ||
|
|
3aefdfa28a | ||
|
|
73dc2aaa76 | ||
|
|
6c066a8e6d | ||
|
|
bb81bfaa66 | ||
|
|
fbdd449759 | ||
|
|
54e6a48a48 | ||
|
|
7a4ecd1706 | ||
|
|
cda52b60c1 | ||
|
|
e1227954f5 | ||
|
|
1ebc0d5cc7 | ||
|
|
1d5cb599fd | ||
|
|
d0ce534734 | ||
|
|
6d80ca1723 | ||
|
|
419c2731bb | ||
|
|
8fb01ddd08 | ||
|
|
c7105765c6 | ||
|
|
f26a555f13 | ||
|
|
b5d8b1057d | ||
|
|
cba8bf6bfa |
647 changed files with 35241 additions and 76333 deletions
|
|
@ -1 +0,0 @@
|
||||||
63ff312818a5f70eab9ec5bf80b53bdd7bf80248
|
|
||||||
31
.github/dependabot.yml
vendored
31
.github/dependabot.yml
vendored
|
|
@ -1,31 +0,0 @@
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
# branch - master
|
|
||||||
- package-ecosystem: "maven"
|
|
||||||
directory: "/"
|
|
||||||
labels:
|
|
||||||
- "dependencies"
|
|
||||||
- "v3.x"
|
|
||||||
target-branch: "master"
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
||||||
|
|
||||||
# branch - v2.x
|
|
||||||
- package-ecosystem: "maven"
|
|
||||||
directory: "/"
|
|
||||||
labels:
|
|
||||||
- "dependencies"
|
|
||||||
- "v2.x"
|
|
||||||
target-branch: "v2.x"
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
||||||
|
|
||||||
# branch - v1.x
|
|
||||||
- package-ecosystem: "maven"
|
|
||||||
directory: "/"
|
|
||||||
labels:
|
|
||||||
- "dependencies"
|
|
||||||
- "v1.x"
|
|
||||||
target-branch: "v1.x"
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
||||||
144
.github/scripts/backwards_compatibility_check.sh
vendored
144
.github/scripts/backwards_compatibility_check.sh
vendored
|
|
@ -1,144 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
TRUE=1
|
|
||||||
FALSE=0
|
|
||||||
KCL_MAVEN_DIR=~/.m2/repository/software/amazon/kinesis/amazon-kinesis-client
|
|
||||||
|
|
||||||
REMOVED_METHODS_FLAG=$FALSE
|
|
||||||
LATEST_VERSION=""
|
|
||||||
LATEST_JAR=""
|
|
||||||
CURRENT_VERSION=""
|
|
||||||
CURRENT_JAR=""
|
|
||||||
|
|
||||||
# Get the JAR from the latest version release on Maven.
|
|
||||||
get_latest_jar() {
|
|
||||||
# clear the directory so that the latest release will be the only version in the Maven directory after running mvn dependency:get
|
|
||||||
rm -rf "$KCL_MAVEN_DIR"
|
|
||||||
mvn -B dependency:get -Dartifact=software.amazon.kinesis:amazon-kinesis-client:LATEST
|
|
||||||
LATEST_VERSION=$(ls "$KCL_MAVEN_DIR" | grep -E '[0-9]+.[0-9]+.[0-9]+')
|
|
||||||
LATEST_JAR=$KCL_MAVEN_DIR/$LATEST_VERSION/amazon-kinesis-client-$LATEST_VERSION.jar
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get the JAR with the changes that need to be verified.
|
|
||||||
get_current_jar() {
|
|
||||||
mvn -B install -Dmaven.test.skip=true
|
|
||||||
CURRENT_VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
|
|
||||||
CURRENT_JAR=$KCL_MAVEN_DIR/$CURRENT_VERSION/amazon-kinesis-client-$CURRENT_VERSION.jar
|
|
||||||
}
|
|
||||||
|
|
||||||
is_new_minor_release() {
|
|
||||||
is_new_major_release && return 1
|
|
||||||
|
|
||||||
local latest_minor_version=$(echo "$LATEST_VERSION" | cut -d . -f 2)
|
|
||||||
local current_minor_version=$(echo "$CURRENT_VERSION" | cut -d . -f 2)
|
|
||||||
[[ "$latest_minor_version" != "$current_minor_version" ]]
|
|
||||||
return $?
|
|
||||||
}
|
|
||||||
|
|
||||||
is_new_major_release() {
|
|
||||||
local latest_major_version=$(echo "$LATEST_VERSION" | cut -d . -f 1)
|
|
||||||
local current_major_version=$(echo "$CURRENT_VERSION" | cut -d . -f 1)
|
|
||||||
[[ "$latest_major_version" != "$current_major_version" ]]
|
|
||||||
return $?
|
|
||||||
}
|
|
||||||
|
|
||||||
# Skip classes with the KinesisClientInternalApi annotation. These classes are subject to breaking backwards compatibility.
|
|
||||||
is_kinesis_client_internal_api() {
|
|
||||||
local current_class="$1"
|
|
||||||
local grep_internal_api_result=$(javap -v -classpath "$LATEST_JAR" "$current_class" | grep KinesisClientInternalApi)
|
|
||||||
[[ "$grep_internal_api_result" != "" ]]
|
|
||||||
return $?
|
|
||||||
}
|
|
||||||
|
|
||||||
# Skip classes which are not public (e.g. package level). These classes will not break backwards compatibility.
|
|
||||||
is_non_public_class() {
|
|
||||||
local current_class="$1"
|
|
||||||
local class_definition=$(javap -classpath "$LATEST_JAR" "$current_class" | head -2 | tail -1)
|
|
||||||
[[ "$class_definition" != *"public"* ]]
|
|
||||||
return $?
|
|
||||||
}
|
|
||||||
|
|
||||||
# Ignore methods that change from abstract to non-abstract (and vice-versa) if the class is an interface.\
|
|
||||||
# Ignore methods that change from synchronized to non-synchronized (and vice-versa)
|
|
||||||
ignore_non_breaking_changes() {
|
|
||||||
local current_class="$1"
|
|
||||||
local class_definition=$(javap -classpath "$LATEST_JAR" "$current_class" | head -2 | tail -1)
|
|
||||||
if [[ $class_definition == *"interface"* ]]
|
|
||||||
then
|
|
||||||
LATEST_METHODS=${LATEST_METHODS//abstract /}
|
|
||||||
CURRENT_METHODS=${CURRENT_METHODS//abstract /}
|
|
||||||
else
|
|
||||||
LATEST_METHODS=${LATEST_METHODS//synchronized /}
|
|
||||||
CURRENT_METHODS=${CURRENT_METHODS//synchronized /}
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Checks if there are any methods in the latest version that were removed in the current version.
|
|
||||||
find_removed_methods() {
|
|
||||||
echo "Checking if methods in current version (v$CURRENT_VERSION) were removed from latest version (v$LATEST_VERSION)"
|
|
||||||
if is_new_minor_release || is_new_major_release
|
|
||||||
then
|
|
||||||
echo "New minor/major release is being performed. Ignoring changes in classes marked with @KinesisClientInternalApi annotation."
|
|
||||||
fi
|
|
||||||
local latest_classes=$(
|
|
||||||
jar tf $LATEST_JAR |
|
|
||||||
grep .class |
|
|
||||||
tr / . |
|
|
||||||
sed 's/\.class$//' |
|
|
||||||
# skip generated proto classes since these have a lot of inherited methods
|
|
||||||
# that are not outputted by javap. besides, generated java code is not a
|
|
||||||
# good indicator of proto compatibility- it will not capture reserved
|
|
||||||
# tags or deprecated fields.
|
|
||||||
grep -v 'software\.amazon\.kinesis\.retrieval\.kpl\.Messages')
|
|
||||||
for class in $latest_classes
|
|
||||||
do
|
|
||||||
if is_kinesis_client_internal_api "$class" || is_non_public_class "$class"
|
|
||||||
then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
CURRENT_METHODS=$(javap -classpath "$CURRENT_JAR" "$class" 2>/dev/null)
|
|
||||||
if [ -z "$CURRENT_METHODS" ]
|
|
||||||
then
|
|
||||||
echo "Class $class was removed"
|
|
||||||
REMOVED_METHODS_FLAG=$TRUE
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
LATEST_METHODS=$(javap -classpath "$LATEST_JAR" "$class")
|
|
||||||
|
|
||||||
ignore_non_breaking_changes "$class"
|
|
||||||
|
|
||||||
local removed_methods=$(diff <(echo "$LATEST_METHODS") <(echo "$CURRENT_METHODS") | grep '^<')
|
|
||||||
|
|
||||||
# ignore synthetic access methods - these are not available to users and will not break backwards compatibility
|
|
||||||
removed_methods=$(echo "$removed_methods" | grep -v "access\$[0-9]\+")
|
|
||||||
|
|
||||||
if [[ "$removed_methods" != "" ]]
|
|
||||||
then
|
|
||||||
REMOVED_METHODS_FLAG=$TRUE
|
|
||||||
echo "$class does not have method(s):"
|
|
||||||
echo "$removed_methods"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
get_backwards_compatible_result() {
|
|
||||||
if [[ $REMOVED_METHODS_FLAG == $TRUE ]]
|
|
||||||
then
|
|
||||||
echo "Current KCL version $CURRENT_VERSION is not backwards compatible with version $LATEST_VERSION. See output above for removed packages/methods."
|
|
||||||
is_new_major_release || exit 1
|
|
||||||
else
|
|
||||||
echo "Current KCL version $CURRENT_VERSION is backwards compatible with version $LATEST_VERSION."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
main() {
|
|
||||||
get_latest_jar
|
|
||||||
get_current_jar
|
|
||||||
find_removed_methods
|
|
||||||
get_backwards_compatible_result
|
|
||||||
}
|
|
||||||
|
|
||||||
main
|
|
||||||
47
.github/workflows/maven.yml
vendored
47
.github/workflows/maven.yml
vendored
|
|
@ -7,59 +7,26 @@
|
||||||
# documentation.
|
# documentation.
|
||||||
|
|
||||||
name: Java CI with Maven
|
name: Java CI with Maven
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- "master"
|
- 'v1.x'
|
||||||
- "v2.x"
|
|
||||||
- "v1.x"
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- "master"
|
- 'v1.x'
|
||||||
- "v2.x"
|
|
||||||
- "v1.x"
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
- name: Set up JDK 8
|
- name: Set up JDK 8
|
||||||
uses: actions/setup-java@v4
|
uses: actions/setup-java@v3
|
||||||
with:
|
with:
|
||||||
java-version: '8'
|
java-version: '8'
|
||||||
distribution: 'corretto'
|
distribution: 'corretto'
|
||||||
- name: Build with Maven
|
- name: Build with Maven
|
||||||
run: mvn -B package --file pom.xml -DskipITs
|
run: mvn -B package --file pom.xml -DskipITs
|
||||||
backwards-compatible-check:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Set up JDK 8
|
|
||||||
uses: actions/setup-java@v4
|
|
||||||
with:
|
|
||||||
java-version: '8'
|
|
||||||
distribution: 'corretto'
|
|
||||||
- name: Check backwards compatibility of changes
|
|
||||||
run: .github/scripts/backwards_compatibility_check.sh
|
|
||||||
auto-merge:
|
|
||||||
needs: [build]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: github.event.pull_request.user.login == 'dependabot[bot]'
|
|
||||||
steps:
|
|
||||||
- name: Dependabot metadata
|
|
||||||
id: metadata
|
|
||||||
uses: dependabot/fetch-metadata@v2
|
|
||||||
with:
|
|
||||||
alert-lookup: true
|
|
||||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
- name: Enable auto-merge for Dependabot PRs
|
|
||||||
if: steps.metadata.outputs.update-type == 'version-update:semver-patch' && steps.metadata.outputs.cvss > 0
|
|
||||||
run: gh pr merge --auto --merge "$PR_URL"
|
|
||||||
env:
|
|
||||||
PR_URL: ${{github.event.pull_request.html_url}}
|
|
||||||
GH_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -1,6 +1,3 @@
|
||||||
target/
|
target/
|
||||||
AwsCredentials.properties
|
AwsCredentials.properties
|
||||||
.idea
|
.idea
|
||||||
*.iml
|
|
||||||
*.swp
|
|
||||||
.DS_Store
|
|
||||||
|
|
|
||||||
571
CHANGELOG.md
571
CHANGELOG.md
|
|
@ -1,64 +1,523 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
For **1.x** release notes, please see [v1.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v1.x/CHANGELOG.md)
|
### Announcement on March 13, 2025
|
||||||
|
Amazon Kinesis Client Library (KCL) 1.x will reach end-of-support on January 30, 2026. Accordingly, these versions will enter maintenance mode on April 17, 2025. During maintenance mode, AWS will provide updates only for critical bug fixes and security issues. Major versions in maintenance mode will not receive updates for new features or feature enhancements. If you’re using KCL 1.x, we recommend migrating to the latest versions. When migrating from KCL 1.x to 3.x, you will need to update interfaces and security credential providers in your application. For details about the end-of-support notice and required actions, see the following links:
|
||||||
|
|
||||||
For **2.x** release notes, please see [v2.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v2.x/CHANGELOG.md)
|
* [AWS Blog: Announcing end-of-support for Amazon Kinesis Client Library 1.x and Amazon Kinesis Producer Library 0.x effective January 30, 2026](https://aws.amazon.com/blogs/big-data/announcing-end-of-support-for-amazon-kinesis-client-library-1-x-and-amazon-kinesis-producer-library-0-x-effective-january-30-2026/)
|
||||||
|
* [Kinesis documentation: KCL version lifecycle policy](https://docs.aws.amazon.com/streams/latest/dev/kcl-version-lifecycle-policy.html)
|
||||||
|
* [Kinesis documentation: Migrating from KCL 1.x to KCL 3.x](https://docs.aws.amazon.com/streams/latest/dev/kcl-migration-1-3.html)
|
||||||
|
|
||||||
---
|
### Latest Release (1.15.2 - Aug 14, 2024)
|
||||||
### Release 3.0.3 (May 7, 2025)
|
* [#1371](https://github.com/awslabs/amazon-kinesis-client/pull/1371) Fix a bug in debug and trace logging levels for worker
|
||||||
* [#1464](https://github.com/awslabs/amazon-kinesis-client/pull/1464) Add config for LeaseAssignmentManager frequency and improve assignment time of newly created leases
|
* [#1224](https://github.com/awslabs/amazon-kinesis-client/pull/1224) Modify RecordProcessorCheckpointer#advancePosition Metrics usage to ensure proper closure
|
||||||
* [#1463](https://github.com/awslabs/amazon-kinesis-client/pull/1463) Extend ShardConsumer constructor to have ConsumerTaskFactory as a parameter to support [DynamoDB Streams Kinesis Adapter](https://github.com/awslabs/dynamodb-streams-kinesis-adapter) compatibility
|
* [#1345](https://github.com/awslabs/amazon-kinesis-client/pull/1345) Generate wrappers from proto files instead of shipping them directly
|
||||||
* [#1472](https://github.com/awslabs/amazon-kinesis-client/pull/1472) Remove unused synchronized keyword
|
* [#1346](https://github.com/awslabs/amazon-kinesis-client/pull/1346) Upgrade com.google.protobuf:protobuf-java from 3.23.4 to 4.27.1
|
||||||
|
* [#1338](https://github.com/awslabs/amazon-kinesis-client/pull/1338) Upgrade org.apache.logging.log4j:log4j-api from 2.20.0 to 2.23.1
|
||||||
|
* [#1327](https://github.com/awslabs/amazon-kinesis-client/pull/1327) Upgrade com.google.guava:guava from 33.0.0-jre to 33.2.0-jre
|
||||||
|
* [#1283](https://github.com/awslabs/amazon-kinesis-client/pull/1283) Upgrade com.fasterxml.jackson.core:jackson-core from 2.15.2 to 2.17.0
|
||||||
|
* [#1284](https://github.com/awslabs/amazon-kinesis-client/pull/1284) Upgrade aws-java-sdk.version from 1.12.647 to 1.12.681
|
||||||
|
* [#1288](https://github.com/awslabs/amazon-kinesis-client/pull/1288) Upgrade commons-logging:commons-logging from 1.2 to 1.3.1
|
||||||
|
* [#1289](https://github.com/awslabs/amazon-kinesis-client/pull/1289) Upgrade org.projectlombok:lombok from 1.18.22 to 1.18.32
|
||||||
|
* [#1248](https://github.com/awslabs/amazon-kinesis-client/pull/1248) Upgrade org.apache.maven.plugins:maven-surefire-plugin from 2.22.2 to 3.2.5
|
||||||
|
* [#1234](https://github.com/awslabs/amazon-kinesis-client/pull/1234) Upgrade org.apache.maven.plugins:maven-javadoc-plugin from 3.4.1 to 3.6.3
|
||||||
|
* [#1137](https://github.com/awslabs/amazon-kinesis-client/pull/1137) Upgrade maven-failsafe-plugin from 2.22.2 to 3.1.2
|
||||||
|
* [#1134](https://github.com/awslabs/amazon-kinesis-client/pull/1134) Upgrade jackson-core from 2.15.0 to 2.15.2
|
||||||
|
* [#1119](https://github.com/awslabs/amazon-kinesis-client/pull/1119) Upgrade maven-source-plugin from 3.2.1 to 3.3.0
|
||||||
|
* [#1165](https://github.com/awslabs/amazon-kinesis-client/pull/1165) Upgrade protobuf-java from 3.19.6 to 3.23.4
|
||||||
|
|
||||||
### Release 3.0.2 (March 12, 2025)
|
### Release (1.15.1 - Feb 5, 2023)
|
||||||
* [#1443](https://github.com/awslabs/amazon-kinesis-client/pull/1443) Reduce DynamoDB IOPS for smaller stream and worker count applications
|
* [#1214](https://github.com/awslabs/amazon-kinesis-client/pull/1214) Added backoff logic for ShardSyncTaskIntegrationTest
|
||||||
* The below two PRs are intended to support [DynamoDB Streams Kinesis Adapter](https://github.com/awslabs/dynamodb-streams-kinesis-adapter) compatibility
|
* [#1214](https://github.com/awslabs/amazon-kinesis-client/pull/1214) Upgrade Guava version from 31.0.1 to 32.1.1
|
||||||
* [#1441](https://github.com/awslabs/amazon-kinesis-client/pull/1441) Make consumerTaskFactory overridable by customers
|
* [#1252](https://github.com/awslabs/amazon-kinesis-client/pull/1252) Upgrade aws-java-sdk from 1.12.406 to 1.12.647
|
||||||
* [#1440](https://github.com/awslabs/amazon-kinesis-client/pull/1440) Make ShutdownTask, ProcessTask, InitializeTask, BlockOnParentTask, and ShutdownNotificationTask overridable by customers
|
|
||||||
* [#1437](https://github.com/awslabs/amazon-kinesis-client/pull/1437) Suppress LeaseAssignmentManager excessive WARN logs when there is no issue
|
|
||||||
* [#1439](https://github.com/awslabs/amazon-kinesis-client/pull/1439) Upgrade io.netty:netty-handler from 4.1.108.Final to 4.1.118.Final
|
|
||||||
* [#1400](https://github.com/awslabs/amazon-kinesis-client/pull/1400) Upgrade com.fasterxml.jackson.core:jackson-databind from 2.10.1 to 2.12.7.1
|
|
||||||
|
|
||||||
### Release 3.0.1 (November 14, 2024)
|
### Release (1.15.0 - Jun 8, 2023)
|
||||||
* [#1401](https://github.com/awslabs/amazon-kinesis-client/pull/1401) Fixed the lease graceful handoff behavior in the multi-stream processing mode
|
* **[#1108](https://github.com/awslabs/amazon-kinesis-client/pull/1108) Add support for Stream ARNs**
|
||||||
* [#1398](https://github.com/awslabs/amazon-kinesis-client/pull/1398) Addressed several KCL 3.0 related issues raised via GitHub
|
* [#1111](https://github.com/awslabs/amazon-kinesis-client/pull/1111) More consistent testing behavior with HashRangesAreAlwaysComplete
|
||||||
* Fixed transitive dependencies and added a Maven plugin to catch potential transitive dependency issues at build time
|
* [#1054](https://github.com/awslabs/amazon-kinesis-client/pull/1054) Upgrade log4j-core from 2.17.1 to 2.20.0
|
||||||
* Removed the redundant shutdown of the leaseCoordinatorThreadPool
|
* [#1103](https://github.com/awslabs/amazon-kinesis-client/pull/1103) Upgrade jackson-core from 2.13.0 to 2.15.0
|
||||||
* Fixed typo THROUGHOUT_PUT_KBPS
|
* [#943](https://github.com/awslabs/amazon-kinesis-client/pull/943) Upgrade nexus-staging-maven-plugin from 1.6.8 to 1.6.13
|
||||||
* Fixed issues in scheduler shutdown sequence
|
* [#1044](https://github.com/awslabs/amazon-kinesis-client/pull/1044) Upgrade aws-java-sdk.version from 1.12.406 to 1.12.408
|
||||||
|
* [#1055](https://github.com/awslabs/amazon-kinesis-client/pull/1055) Upgrade maven-compiler-plugin from 3.10.0 to 3.11.0
|
||||||
|
|
||||||
* Note: If you are using [multi-stream processing with KCL](https://docs.aws.amazon.com/streams/latest/dev/kcl-multi-stream.html), you need to use the release 3.0.1 or later.
|
### Release (1.14.10 - Feb 15, 2023)
|
||||||
|
* Updated aws-java-sdk from 1.12.130 to 1.12.406
|
||||||
|
* Updated com.google.protobuf from 3.19.4 to 3.19.6
|
||||||
|
* [Issue #1026](https://github.com/awslabs/amazon-kinesis-client/issues/1026)
|
||||||
|
* [PR #1042](https://github.com/awslabs/amazon-kinesis-client/pull/1042)
|
||||||
|
|
||||||
### Release 3.0.0 (November 06, 2024)
|
### Release (1.14.9 - Dec 14, 2022)
|
||||||
* New lease assignment / load balancing algorithm
|
* [#995](https://github.com/awslabs/amazon-kinesis-client/commit/372f98b21a91487e36612d528c56765a44b0aa86) Every other change for DynamoDBStreamsKinesis Adapter Compatibility
|
||||||
* KCL 3.x introduces a new lease assignment and load balancing algorithm. It assigns leases among workers based on worker utilization metrics and throughput on each lease, replacing the previous lease count-based lease assignment algorithm.
|
* [#970](https://github.com/awslabs/amazon-kinesis-client/commit/251b331a2e0fd912b50f8b5a12d088bf0b3263b9) PeriodicShardSyncManager Changes Needed for DynamoDBStreamsKinesisAdapter
|
||||||
* When KCL detects higher variance in CPU utilization among workers, it proactively reassigns leases from over-utilized workers to under-utilized workers for even load balancing. This ensures even CPU utilization across workers and removes the need to over-provision the stream processing compute hosts.
|
|
||||||
* Optimized DynamoDB RCU usage
|
### Release (1.14.8 - Feb 24, 2022)
|
||||||
* KCL 3.x optimizes DynamoDB read capacity unit (RCU) usage on the lease table by implementing a global secondary index with leaseOwner as the partition key. This index mirrors the leaseKey attribute from the base lease table, allowing workers to efficiently discover their assigned leases by querying the index instead of scanning the entire table.
|
* [Bump log4j-core from 2.17.0 to 2.17.1](https://github.com/awslabs/amazon-kinesis-client/commit/94b138a9d9a502ee0f4f000bb0efd2766ebadc37)
|
||||||
* This approach significantly reduces read operations compared to earlier KCL versions, where workers performed full table scans, resulting in higher RCU consumption.
|
* [Bump protobuf-java from 3.19.1 to 3.19.4](https://github.com/awslabs/amazon-kinesis-client/commit/a809b12c43c57a3d6ad3827feb60e4322614259c)
|
||||||
* Graceful lease handoff
|
* [Bump maven-compiler-plugin from 3.8.1 to 3.10.0](https://github.com/awslabs/amazon-kinesis-client/commit/37b5d7b9a1ccad483469ef542a6a7237462b14f2)
|
||||||
* KCL 3.x introduces a feature called "graceful lease handoff" to minimize data reprocessing during lease reassignments. Graceful lease handoff allows the current worker to complete checkpointing of processed records before transferring the lease to another worker. For graceful lease handoff, you should implement checkpointing logic within the existing `shutdownRequested()` method.
|
|
||||||
* This feature is enabled by default in KCL 3.x, but you can turn off this feature by adjusting the configuration property `isGracefulLeaseHandoffEnabled`.
|
### Release (1.14.7 - Dec 22, 2021)
|
||||||
* While this approach significantly reduces the probability of data reprocessing during lease transfers, it doesn't completely eliminate the possibility. To maintain data integrity and consistency, it's crucial to design your downstream consumer applications to be idempotent. This ensures that the application can handle potential duplicate record processing without adverse effects.
|
* [#881](https://github.com/awslabs/amazon-kinesis-client/pull/881) Update log4j test dependency from 2.16.0 to 2.17.0 and some other dependencies
|
||||||
* New DynamoDB metadata management artifacts
|
|
||||||
* KCL 3.x introduces two new DynamoDB tables for improved lease management:
|
### Release (1.14.6 - Dec 15, 2021)
|
||||||
* Worker metrics table: Records CPU utilization metrics from each worker. KCL uses these metrics for optimal lease assignments, balancing resource utilization across workers. If CPU utilization metric is not available, KCL assigns leases to balance the total sum of shard throughput per worker instead.
|
* [#876](https://github.com/awslabs/amazon-kinesis-client/pull/876) Update log4j test dependency from 2.15.0 to 2.16.0
|
||||||
* Coordinator state table: Stores internal state information for workers. Used to coordinate in-place migration from KCL 2.x to KCL 3.x and leader election among workers.
|
|
||||||
* Follow this [documentation](https://docs.aws.amazon.com/streams/latest/dev/kcl-migration-from-2-3.html#kcl-migration-from-2-3-IAM-permissions) to add required IAM permissions for your KCL application.
|
### Release (1.14.5 - Dec 10, 2021)
|
||||||
* Other improvements and changes
|
* [#872](https://github.com/awslabs/amazon-kinesis-client/pull/872) Update log4j test dependency from 1.2.17 to 2.15.0
|
||||||
* Dependency on the AWS SDK for Java 1.x has been fully removed.
|
* [#873](https://github.com/awslabs/amazon-kinesis-client/pull/873) Upgrading version of AWS Java SDK to 1.12.128
|
||||||
* The Glue Schema Registry integration functionality no longer depends on AWS SDK for Java 1.x. Previously, it required this as a transient dependency.
|
|
||||||
* Multilangdaemon has been upgraded to use AWS SDK for Java 2.x. It no longer depends on AWS SDK for Java 1.x.
|
### Release (1.14.4 - June 14, 2021)
|
||||||
* `idleTimeBetweenReadsInMillis` (PollingConfig) now has a minimum default value of 200.
|
* [Milestone#61](https://github.com/awslabs/amazon-kinesis-client/milestone/61)
|
||||||
* This polling configuration property determines the [publishers](https://github.com/awslabs/amazon-kinesis-client/blob/master/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java) wait time between GetRecords calls in both success and failure cases. Previously, setting this value below 200 caused unnecessary throttling. This is because Amazon Kinesis Data Streams supports up to five read transactions per second per shard for shared-throughput consumers.
|
* [#816](https://github.com/awslabs/amazon-kinesis-client/pull/816) Updated the Worker shutdown logic to make sure that the `LeaseCleanupManager` also terminates all the threads that it has started.
|
||||||
* Shard lifecycle management is improved to deal with edge cases around shard splits and merges to ensure records continue being processed as expected.
|
* [#821](https://github.com/awslabs/amazon-kinesis-client/pull/821) Upgrading version of AWS Java SDK to 1.12.3
|
||||||
* Migration
|
|
||||||
* The programming interfaces of KCL 3.x remain identical with KCL 2.x for an easier migration, with the exception of those applications that do not use the recommended approach of using the Config Builder. These applications will have to refer to [the troubleshooting guide](https://docs.aws.amazon.com/streams/latest/dev/troubleshooting-consumers.html#compiliation-error-leasemanagementconfig). For detailed migration instructions, please refer to the [Migrate consumers from KCL 2.x to KCL 3.x](https://docs.aws.amazon.com/streams/latest/dev/kcl-migration-from-2-3.html) page in the Amazon Kinesis Data Streams developer guide.
|
### Release (1.14.3 - May 3, 2021)
|
||||||
* Configuration properties
|
* [Milestone#60](https://github.com/awslabs/amazon-kinesis-client/milestone/60)
|
||||||
* New configuration properties introduced in KCL 3.x are listed in this [doc](https://github.com/awslabs/amazon-kinesis-client/blob/master/docs/kcl-configurations.md#new-configurations-in-kcl-3x).
|
* [#811](https://github.com/awslabs/amazon-kinesis-client/pull/811) Fixing a bug in `KinesisProxy` that can lead to undetermined behavior during partial failures.
|
||||||
* Deprecated configuration properties in KCL 3.x are listed in this [doc](https://github.com/awslabs/amazon-kinesis-client/blob/master/docs/kcl-configurations.md#discontinued-configuration-properties-in-kcl-3x). You need to keep the deprecated configuration properties during the migration from any previous KCL version to KCL 3.x.
|
* [#811](https://github.com/awslabs/amazon-kinesis-client/pull/811) Adding guardrails to handle duplicate shards from the service.
|
||||||
* Metrics
|
|
||||||
* New CloudWatch metrics introduced in KCL 3.x are explained in the [Monitor the Kinesis Client Library with Amazon CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-kcl.html) in the Amazon Kinesis Data Streams developer guide. The following operations are newly added in KCL 3.x:
|
## Release (1.14.2 - February 24, 2021)
|
||||||
* `LeaseAssignmentManager`
|
* [Milestone#57](https://github.com/awslabs/amazon-kinesis-client/milestone/57)
|
||||||
* `WorkerMetricStatsReporter`
|
* [#790](https://github.com/awslabs/amazon-kinesis-client/pull/790) Fixing a bug that caused paginated `ListShards` calls with the `ShardFilter` parameter to fail when the lease table was being initialized.
|
||||||
* `LeaseDiscovery`
|
|
||||||
|
## Release (1.14.1 - January 27, 2021)
|
||||||
|
* [Milestone#56](https://github.com/awslabs/amazon-kinesis-client/milestone/56)
|
||||||
|
|
||||||
|
* Fix for cross DDB table interference when multiple KCL applications are run in same JVM.
|
||||||
|
* Fix and guards to avoid potential checkpoint rewind during shard end, which may block children shard processing.
|
||||||
|
* Fix for thread cycle wastage on InitializeTask for deleted shard.
|
||||||
|
* Improved logging in LeaseCleanupManager that would indicate why certain shards are not cleaned up from the lease table.
|
||||||
|
|
||||||
|
|
||||||
|
## Release (1.14.0 - August 17, 2020)
|
||||||
|
|
||||||
|
* [Milestone#50](https://github.com/awslabs/amazon-kinesis-client/milestone/50)
|
||||||
|
|
||||||
|
* Behavior of shard synchronization is moving from each worker independently learning about all existing shards to workers only discovering the children of shards that each worker owns. This optimizes memory usage, lease table IOPS usage, and number of calls made to kinesis for streams with high shard counts and/or frequent resharding.
|
||||||
|
* When bootstrapping an empty lease table, KCL utilizes the ListShard API's filtering option (the ShardFilter optional request parameter) to retrieve and create leases only for a snapshot of shards open at the time specified by the ShardFilter parameter. The ShardFilter parameter enables you to filter out the response of the ListShards API, using the Type parameter. KCL uses the Type filter parameter and the following of its valid values to identify and return a snapshot of open shards that might require new leases.
|
||||||
|
* Currently, the following shard filters are supported:
|
||||||
|
* `AT_TRIM_HORIZON` - the response includes all the shards that were open at `TRIM_HORIZON`.
|
||||||
|
* `AT_LATEST` - the response includes only the currently open shards of the data stream.
|
||||||
|
* `AT_TIMESTAMP` - the response includes all shards whose start timestamp is less than or equal to the given timestamp and end timestamp is greater than or equal to the given timestamp or still open.
|
||||||
|
* `ShardFilter` is used when creating leases for an empty lease table to initialize leases for a snapshot of shards specified at `KinesisClientLibConfiguration#initialPositionInStreamExtended`.
|
||||||
|
* For more information about ShardFilter, see the [official AWS documentation on ShardFilter](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ShardFilter.html).
|
||||||
|
|
||||||
|
* Introducing support for the `ChildShards` response of the `GetRecords` API to perform lease/shard synchronization that happens at `SHARD_END` for closed shards, allowing a KCL worker to only create leases for the child shards of the shard it finished processing.
|
||||||
|
* For KCL 1.x applications, this uses the `ChildShards` response of the `GetRecords` API.
|
||||||
|
* For more information, see the official AWS Documentation on [GetRecords](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) and [ChildShard](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ChildShard.html).
|
||||||
|
|
||||||
|
* KCL now also performs additional periodic shard/lease scans in order to identify any potential holes in the lease table to ensure the complete hash range of the stream is being processed and create leases for them if required. When `KinesisClientLibConfiguration#shardSyncStrategyType` is set to `ShardSyncStrategyType.SHARD_END`, `PeriodicShardSyncManager#leasesRecoveryAuditorInconsistencyConfidenceThreshold` will be used to determine the threshold for number of consecutive scans containing holes in the lease table after which to enforce a shard sync. When `KinesisClientLibConfiguration#shardSyncStrategyType` is set to `ShardSyncStrategyType.PERIODIC`, `leasesRecoveryAuditorInconsistencyConfidenceThreshold` is ignored.
|
||||||
|
* New configuration options are available to configure `PeriodicShardSyncManager` in `KinesisClientLibConfiguration`
|
||||||
|
|
||||||
|
| Name | Default | Description |
|
||||||
|
| ----------------------------------------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| leasesRecoveryAuditorInconsistencyConfidenceThreshold | 3 | Confidence threshold for the periodic auditor job to determine if leases for a stream in the lease table is inconsistent. If the auditor finds same set of inconsistencies consecutively for a stream for this many times, then it would trigger a shard sync. Only used for `ShardSyncStrategyType.SHARD_END`. |
|
||||||
|
|
||||||
|
* New CloudWatch metrics are also now emitted to monitor the health of `PeriodicShardSyncManager`:
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --------------------------- | ------------------------------------------------------ |
|
||||||
|
| NumStreamsWithPartialLeases | Number of streams that had holes in their hash ranges. |
|
||||||
|
| NumStreamsToSync | Number of streams which underwent a full shard sync. |
|
||||||
|
|
||||||
|
* Introducing deferred lease cleanup. Leases will be deleted asynchronously by `LeaseCleanupManager` upon reaching `SHARD_END`, when a shard has either expired past the stream’s retention period or been closed as the result of a resharding operation.
|
||||||
|
* New configuration options are available to configure `LeaseCleanupManager`.
|
||||||
|
|
||||||
|
| Name | Default | Description |
|
||||||
|
| ----------------------------------- | ---------- | --------------------------------------------------------------------------------------------------------- |
|
||||||
|
| leaseCleanupIntervalMillis | 1 minute | Interval at which to run lease cleanup thread. |
|
||||||
|
| completedLeaseCleanupIntervalMillis | 5 minutes | Interval at which to check if a lease is completed or not. |
|
||||||
|
| garbageLeaseCleanupIntervalMillis | 30 minutes | Interval at which to check if a lease is garbage (i.e trimmed past the stream's retention period) or not. |
|
||||||
|
|
||||||
|
* Including an optimization to `KinesisShardSyncer` to only create leases for one layer of shards.
|
||||||
|
* Changing default shard prioritization strategy to be `NoOpShardPrioritization` to allow prioritization of completed shards. Customers who are upgrading to this version and are reading from `TRIM_HORIZON` should continue using `ParentsFirstShardPrioritization` while upgrading.
|
||||||
|
* Upgrading version of AWS SDK to 1.11.844.
|
||||||
|
* [#719](https://github.com/awslabs/amazon-kinesis-client/pull/719) Upgrading version of Google Protobuf to 3.11.4.
|
||||||
|
* [#712](https://github.com/awslabs/amazon-kinesis-client/pull/712) Allowing KCL to consider lease tables in `UPDATING` healthy.
|
||||||
|
|
||||||
|
## Release 1.13.3 (1.13.3 March 2, 2020)
|
||||||
|
[Milestone#49] (https://github.com/awslabs/amazon-kinesis-client/milestone/49)
|
||||||
|
* Refactoring shard closure verification performed by ShutdownTask.
|
||||||
|
* [PR #684] (https://github.com/awslabs/amazon-kinesis-client/pull/684)
|
||||||
|
* Fixing the bug in ShardSyncTaskManager to resolve the issue of new shards not being processed after resharding.
|
||||||
|
* [PR #694] (https://github.com/awslabs/amazon-kinesis-client/pull/694)
|
||||||
|
|
||||||
|
## Release 1.13.2 (Janurary 13, 2020)
|
||||||
|
* Adding backward compatible constructors that use the default DDB Billing Mode (#673)
|
||||||
|
* [PR #673](https://github.com/awslabs/amazon-kinesis-client/pull/673)
|
||||||
|
|
||||||
|
## Release 1.13.1 (December 31, 2019)
|
||||||
|
[Milestone#44](https://github.com/awslabs/amazon-kinesis-client/milestone/44)
|
||||||
|
* Adding BillingMode Support to KCL 1.x. This enables the customer to specify if they want provisioned capacity for DDB, or pay per request.
|
||||||
|
* [PR #656](https://github.com/awslabs/amazon-kinesis-client/pull/656)
|
||||||
|
* Ensure ShardSyncTask invocation from ShardSyncTaskManager for pending ShardEnd events.
|
||||||
|
* [PR #659](https://github.com/awslabs/amazon-kinesis-client/pull/659)
|
||||||
|
* Fix the LeaseManagementIntegrationTest failure.
|
||||||
|
* [PR #670](https://github.com/awslabs/amazon-kinesis-client/pull/670)
|
||||||
|
|
||||||
|
## Release 1.13.0 (November 5, 2019)
|
||||||
|
[Milestone#42](https://github.com/awslabs/amazon-kinesis-client/milestone/42)
|
||||||
|
* Handling completed and blocked tasks better during graceful shutdown
|
||||||
|
* [PR #640](https://github.com/awslabs/amazon-kinesis-client/pull/640)
|
||||||
|
|
||||||
|
## Release 1.12.0 (October 17, 2019)
|
||||||
|
[Milestone#41](https://github.com/awslabs/amazon-kinesis-client/milestone/41)
|
||||||
|
* Adding logging around shard end codepaths.
|
||||||
|
* [PR #585](https://github.com/awslabs/amazon-kinesis-client/pull/585)
|
||||||
|
* Updating checkpointing failure message to refer to javadocs.
|
||||||
|
* [PR #590](https://github.com/awslabs/amazon-kinesis-client/pull/590)
|
||||||
|
* Updating Sonatype to dedicated AWS endpoint.
|
||||||
|
* [PR #618](https://github.com/awslabs/amazon-kinesis-client/pull/618)
|
||||||
|
* Introducing a validation step to verify if ShardEnd is reached, to prevent shard consumer stuck scenarios in the event of malformed response from service.
|
||||||
|
* [PR #623](https://github.com/awslabs/amazon-kinesis-client/pull/623)
|
||||||
|
* Updating AWS SDK to 1.11.655
|
||||||
|
* [PR #626](https://github.com/awslabs/amazon-kinesis-client/pull/626)
|
||||||
|
|
||||||
|
## Release 1.11.2 (August 15, 2019)
|
||||||
|
[Milestone#35](https://github.com/awslabs/amazon-kinesis-client/milestone/35)
|
||||||
|
* Added support for metrics emission in `PeriodicShardSyncer`.
|
||||||
|
* [PR #592](https://github.com/awslabs/amazon-kinesis-client/pull/592)
|
||||||
|
|
||||||
|
## Release 1.11.1 (August 9, 2019)
|
||||||
|
[Milestone#34](https://github.com/awslabs/amazon-kinesis-client/milestone/34)
|
||||||
|
* Updated the version of the AWS Java SDK to 1.11.603.
|
||||||
|
* [PR #587](https://github.com/awslabs/amazon-kinesis-client/pull/587)
|
||||||
|
* Added logging to `KinesisDataFetcher` when reaching the end of a shard due to a null next iterator.
|
||||||
|
* [PR #585](https://github.com/awslabs/amazon-kinesis-client/pull/585)
|
||||||
|
|
||||||
|
## Release 1.11.0 (August 7, 2019)
|
||||||
|
[Milestone#33](https://github.com/awslabs/amazon-kinesis-client/milestone/33)
|
||||||
|
* Improved exception handling and logging in `KinesisClientLibLeaseCoordinator` to avoid `NullPointerExceptions` when no leases are found.
|
||||||
|
* [PR #558](https://github.com/awslabs/amazon-kinesis-client/pull/558)
|
||||||
|
* Introducing optional new periodic shard sync strategy to perform shard discovery and lease cleanup on a single worker.
|
||||||
|
* [PR #579](https://github.com/awslabs/amazon-kinesis-client/pull/579)
|
||||||
|
|
||||||
|
## Release 1.10.0 (April 8, 2019)
|
||||||
|
[Milestone#31](https://github.com/awslabs/amazon-kinesis-client/milestone/31)
|
||||||
|
* Updated License to [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
* [PR#522](https://github.com/awslabs/amazon-kinesis-client/pull/522)
|
||||||
|
## Release 1.9.3 (October 30, 2018)
|
||||||
|
* Upgraded Apache Commons Lang from 2.6 to 3.7.
|
||||||
|
* [Issue #370](https://github.com/awslabs/amazon-kinesis-client/issues/370)
|
||||||
|
* [PR #406](https://github.com/awslabs/amazon-kinesis-client/pull/406)
|
||||||
|
* Upgraded Google Guava from 10.0 to 26.0-jre.
|
||||||
|
* [Issue #416](https://github.com/awslabs/amazon-kinesis-client/issues/416)
|
||||||
|
* [PR #421](https://github.com/awslabs/amazon-kinesis-client/pull/421)
|
||||||
|
|
||||||
|
## Release 1.9.2 (September 4, 2018)
|
||||||
|
* Allow use of Immutable Clients
|
||||||
|
* [Issue #280](https://github.com/awslabs/amazon-kinesis-client/issues/280)
|
||||||
|
* [PR #305](https://github.com/awslabs/amazon-kinesis-client/pull/305)
|
||||||
|
* Allow the use of `AT_TIMESTAMP` for MultiLang Daemon Clients.
|
||||||
|
* [Issue #341](https://github.com/awslabs/amazon-kinesis-client/issues/341)
|
||||||
|
* [PR #342](https://github.com/awslabs/amazon-kinesis-client/pull/342)
|
||||||
|
* Update the cache for `KinesisProxy#getShard` on cache misses.
|
||||||
|
* [PR #344](https://github.com/awslabs/amazon-kinesis-client/pull/344)
|
||||||
|
* Changed release process to use a standard process.
|
||||||
|
* [PR #389](https://github.com/awslabs/amazon-kinesis-client/pull/389)
|
||||||
|
* Removed tests that expected a null region response for unknown regions.
|
||||||
|
* [PR #346](https://github.com/awslabs/amazon-kinesis-client/pull/346)
|
||||||
|
* Updated the version of the AWS Java SDK to 1.11.400
|
||||||
|
|
||||||
|
## Release 1.9.1 (April 30, 2018)
|
||||||
|
* Added the ability to create a prepared checkpoint when at `SHARD_END`.
|
||||||
|
* [PR #301](https://github.com/awslabs/amazon-kinesis-client/pull/301)
|
||||||
|
* Added the ability to subscribe to worker state change events.
|
||||||
|
* [PR #291](https://github.com/awslabs/amazon-kinesis-client/pull/291)
|
||||||
|
* Added support for custom lease managers.
|
||||||
|
A custom `LeaseManager` can be provided to `Worker.Builder` that will be used to provide lease services.
|
||||||
|
This makes it possible to implement custom lease management systems in addition to the default DynamoDB system.
|
||||||
|
* [PR #297](https://github.com/awslabs/amazon-kinesis-client/pull/297)
|
||||||
|
* Updated the version of the AWS Java SDK to 1.11.219
|
||||||
|
|
||||||
|
## Release 1.9.0 (February 6, 2018)
|
||||||
|
* Introducing support for ListShards API. This API is used in place of DescribeStream API to provide more throughput during ShardSyncTask. Please consult the [AWS Documentation for ListShards](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ListShards.html) for more information.
|
||||||
|
* ListShards supports higher call rate, which should reduce instances of throttling when attempting to synchronize the shard list.
|
||||||
|
* __WARNING: `ListShards` is a new API, and may require updating any explicit IAM policies__
|
||||||
|
* Added configuration parameters for ListShards usage
|
||||||
|
|
||||||
|
| Name | Default | Description |
|
||||||
|
| ---- | ------- | ----------- |
|
||||||
|
| [listShardsBackoffTimeInMillis](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1412) | 1500 ms | This is the default backoff time between 2 ListShards calls when throttled. |
|
||||||
|
| [listShardsRetryAttempts](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1423) | 50 | This is the maximum number of times the KinesisProxy will retry to make ListShards calls on being throttled. |
|
||||||
|
|
||||||
|
* Updating the version of AWS Java SDK to 1.11.272.
|
||||||
|
* Version 1.11.272 is now the minimum support version of the SDK.
|
||||||
|
* Deprecating the following methods, and classes. These methods, and classes will be removed in a future release.
|
||||||
|
* Deprecated [IKinesisProxy#getStreamInfo](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxy.java#L48-L62).
|
||||||
|
* Deprecated [IKinesisProxyFactory](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxyFactory.java).
|
||||||
|
* Deprecated [KinesisProxyFactory](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxyFactory.java).
|
||||||
|
* Deprecated certain [KinesisProxy](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxy.java) constructors.
|
||||||
|
* [PR #293](https://github.com/awslabs/amazon-kinesis-client/pull/293)
|
||||||
|
|
||||||
|
## Release 1.8.10
|
||||||
|
* Allow providing a custom IKinesisProxy implementation.
|
||||||
|
* [PR #274](https://github.com/awslabs/amazon-kinesis-client/pull/274)
|
||||||
|
* Checkpointing on a different thread should no longer emit a warning about NullMetricsScope.
|
||||||
|
* [PR #284](https://github.com/awslabs/amazon-kinesis-client/pull/284)
|
||||||
|
* [Issue #48](https://github.com/awslabs/amazon-kinesis-client/issues/48)
|
||||||
|
* Upgraded the AWS Java SDK to version 1.11.271
|
||||||
|
* [PR #287](https://github.com/awslabs/amazon-kinesis-client/pull/287)
|
||||||
|
|
||||||
|
## Release 1.8.9
|
||||||
|
* Allow disabling check for the case where a child shard has an open parent shard.
|
||||||
|
There is a race condition where it's possible for the a parent shard to appear open, while having child shards. This check can now be disabled by setting [`ignoreUnexpectedChildShards`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1037) to true.
|
||||||
|
* [PR #240](https://github.com/awslabs/amazon-kinesis-client/pull/240)
|
||||||
|
* [Issue #210](https://github.com/awslabs/amazon-kinesis-client/issues/210)
|
||||||
|
* Upgraded the AWS SDK for Java to 1.11.261
|
||||||
|
* [PR #281](https://github.com/awslabs/amazon-kinesis-client/pull/281)
|
||||||
|
|
||||||
|
## Release 1.8.8
|
||||||
|
* Fixed issues with leases losses due to `ExpiredIteratorException` in `PrefetchGetRecordsCache` and `AsynchronousFetchingStrategy`.
|
||||||
|
PrefetchGetRecordsCache will request for a new iterator and start fetching data again.
|
||||||
|
* [PR#263](https://github.com/awslabs/amazon-kinesis-client/pull/263)
|
||||||
|
* Added warning message for long running tasks.
|
||||||
|
Logging long running tasks can be enabled by setting the following configuration property:
|
||||||
|
|
||||||
|
| Name | Default | Description |
|
||||||
|
| ---- | ------- | ----------- |
|
||||||
|
| [`logWarningForTaskAfterMillis`](https://github.com/awslabs/amazon-kinesis-client/blob/3de901ea9327370ed732af86c4d4999c8d99541c/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1367) | Not set | Milliseconds after which the logger will log a warning message for the long running task |
|
||||||
|
|
||||||
|
* [PR#259](https://github.com/awslabs/amazon-kinesis-client/pull/259)
|
||||||
|
* Handling spurious lease renewal failures gracefully.
|
||||||
|
Added better handling of DynamoDB failures when updating leases. These failures would occur when a request to DynamoDB appeared to fail, but was actually successful.
|
||||||
|
* [PR#247](https://github.com/awslabs/amazon-kinesis-client/pull/247)
|
||||||
|
* ShutdownTask gets retried if the previous attempt on the ShutdownTask fails.
|
||||||
|
* [PR#267](https://github.com/awslabs/amazon-kinesis-client/pull/267)
|
||||||
|
* Fix for using maxRecords from `KinesisClientLibConfiguration` in `GetRecordsCache` for fetching records.
|
||||||
|
* [PR#264](https://github.com/awslabs/amazon-kinesis-client/pull/264)
|
||||||
|
|
||||||
|
## Release 1.8.7
|
||||||
|
* Don't add a delay for synchronous requests to Kinesis
|
||||||
|
Removes a delay that had been added for synchronous `GetRecords` calls to Kinesis.
|
||||||
|
* [PR #256](https://github.com/awslabs/amazon-kinesis-client/pull/256)
|
||||||
|
|
||||||
|
## Release 1.8.6
|
||||||
|
* Add prefetching of records from Kinesis
|
||||||
|
Prefetching will retrieve and queue additional records from Kinesis while the application is processing existing records.
|
||||||
|
Prefetching can be enabled by setting [`dataFetchingStrategy`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1317) to `PREFETCH_CACHED`. Once enabled an additional fetching thread will be started to retrieve records from Kinesis. Retrieved records will be held in a queue until the application is ready to process them.
|
||||||
|
Pre-fetching supports the following configuration values:
|
||||||
|
|
||||||
|
| Name | Default | Description |
|
||||||
|
| ---- | ------- | ----------- |
|
||||||
|
| [`dataFetchingStrategy`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1317) | `DEFAULT` | Which data fetching strategy to use |
|
||||||
|
| [`maxPendingProcessRecordsInput`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1296) | 3 | The maximum number of process records input that can be queued |
|
||||||
|
| [`maxCacheByteSize`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1307) | 8 MiB | The maximum number of bytes that can be queued |
|
||||||
|
| [`maxRecordsCount`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1326) | 30,000 | The maximum number of records that can be queued |
|
||||||
|
| [`idleMillisBetweenCalls`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1353) | 1,500 ms | The amount of time to wait between calls to Kinesis |
|
||||||
|
|
||||||
|
* [PR #246](https://github.com/awslabs/amazon-kinesis-client/pull/246)
|
||||||
|
|
||||||
|
## Release 1.8.5 (September 26, 2017)
|
||||||
|
* Only advance the shard iterator for the accepted response.
|
||||||
|
This fixes a race condition in the `KinesisDataFetcher` when it's being used to make asynchronous requests. The shard iterator is now only advanced when the retriever calls `DataFetcherResult#accept()`.
|
||||||
|
* [PR #230](https://github.com/awslabs/amazon-kinesis-client/pull/230)
|
||||||
|
* [Issue #231](https://github.com/awslabs/amazon-kinesis-client/issues/231)
|
||||||
|
|
||||||
|
## Release 1.8.4 (September 22, 2017)
|
||||||
|
* Create a new completion service for each request.
|
||||||
|
This ensures that canceled tasks are discarded. This will prevent a cancellation exception causing issues processing records.
|
||||||
|
* [PR #227](https://github.com/awslabs/amazon-kinesis-client/pull/227)
|
||||||
|
* [Issue #226](https://github.com/awslabs/amazon-kinesis-client/issues/226)
|
||||||
|
|
||||||
|
## Release 1.8.3 (September 22, 2017)
|
||||||
|
* Call shutdown on the retriever when the record processor is being shutdown
|
||||||
|
This fixes a bug that could leak threads if using the [`AsynchronousGetRecordsRetrievalStrategy`](https://github.com/awslabs/amazon-kinesis-client/blob/9a82b6bd05b3c9c5f8581af007141fa6d5f0fc4e/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/AsynchronousGetRecordsRetrievalStrategy.java#L42) is being used.
|
||||||
|
The asynchronous retriever is only used when [`KinesisClientLibConfiguration#retryGetRecordsInSeconds`](https://github.com/awslabs/amazon-kinesis-client/blob/01d2688bc6e68fd3fe5cb698cb65299d67ac930d/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L227), and [`KinesisClientLibConfiguration#maxGetRecordsThreadPool`](https://github.com/awslabs/amazon-kinesis-client/blob/01d2688bc6e68fd3fe5cb698cb65299d67ac930d/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L230) are set.
|
||||||
|
* [PR #222](https://github.com/awslabs/amazon-kinesis-client/pull/222)
|
||||||
|
|
||||||
|
## Release 1.8.2 (September 20, 2017)
|
||||||
|
* Add support for two phase checkpoints
|
||||||
|
Applications can now set a pending checkpoint, before completing the checkpoint operation. Once the application has completed its checkpoint steps, the final checkpoint will clear the pending checkpoint.
|
||||||
|
Should the checkpoint fail the attempted sequence number is provided in the [`InitializationInput#getPendingCheckpointSequenceNumber`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/InitializationInput.java#L81) otherwise the value will be null.
|
||||||
|
* [PR #188](https://github.com/awslabs/amazon-kinesis-client/pull/188)
|
||||||
|
* Support timeouts, and retry for GetRecords calls.
|
||||||
|
Applications can now set timeouts for GetRecord calls to Kinesis. As part of setting the timeout, the application must also provide a thread pool size for concurrent requests.
|
||||||
|
* [PR #214](https://github.com/awslabs/amazon-kinesis-client/pull/214)
|
||||||
|
* Notification when the lease table is throttled
|
||||||
|
When writes, or reads, to the lease table are throttled a warning will be emitted. If you're seeing this warning you should increase the IOPs for your lease table to prevent processing delays.
|
||||||
|
* [PR #212](https://github.com/awslabs/amazon-kinesis-client/pull/212)
|
||||||
|
* Support configuring the graceful shutdown timeout for MultiLang Clients
|
||||||
|
This adds support for setting the timeout that the Java process will wait for the MutliLang client to complete graceful shutdown. The timeout can be configured by adding `shutdownGraceMillis` to the properties file set to the number of milliseconds to wait.
|
||||||
|
* [PR #204](https://github.com/awslabs/amazon-kinesis-client/pull/204)
|
||||||
|
|
||||||
|
## Release 1.8.1 (August 2, 2017)
|
||||||
|
* Support timeouts for calls to the MultiLang Daemon
|
||||||
|
This adds support for setting a timeout when dispatching records to the client record processor. If the record processor doesn't respond within the timeout the parent Java process will be terminated. This is a temporary fix to handle cases where the KCL becomes blocked while waiting for a client record processor.
|
||||||
|
The timeout for the this can be set by adding `timeoutInSeconds = <timeout value>`. The default for this is no timeout.
|
||||||
|
__Setting this can cause the KCL to exit suddenly, before using this ensure that you have an automated restart for your application__
|
||||||
|
* [PR #195](https://github.com/awslabs/amazon-kinesis-client/pull/195)
|
||||||
|
* [Issue #185](https://github.com/awslabs/amazon-kinesis-client/issues/185)
|
||||||
|
|
||||||
|
## Release 1.8.0 (July 25, 2017)
|
||||||
|
* Execute graceful shutdown on its own thread
|
||||||
|
* [PR #191](https://github.com/awslabs/amazon-kinesis-client/pull/191)
|
||||||
|
* [Issue #167](https://github.com/awslabs/amazon-kinesis-client/issues/167)
|
||||||
|
* Added support for controlling the size of the lease renewer thread pool
|
||||||
|
* [PR #177](https://github.com/awslabs/amazon-kinesis-client/pull/177)
|
||||||
|
* [Issue #171](https://github.com/awslabs/amazon-kinesis-client/issues/171)
|
||||||
|
* Require Java 8 and later
|
||||||
|
__Java 8 is now required for versions 1.8.0 of the amazon-kinesis-client and later.__
|
||||||
|
* [PR #176](https://github.com/awslabs/amazon-kinesis-client/issues/176)
|
||||||
|
|
||||||
|
## Release 1.7.6 (June 21, 2017)
|
||||||
|
* Added support for graceful shutdown in MultiLang Clients
|
||||||
|
* [PR #174](https://github.com/awslabs/amazon-kinesis-client/pull/174)
|
||||||
|
* [PR #182](https://github.com/awslabs/amazon-kinesis-client/pull/182)
|
||||||
|
* Updated documentation for `v2.IRecordProcessor#shutdown`, and `KinesisClientLibConfiguration#idleTimeBetweenReadsMillis`
|
||||||
|
* [PR #170](https://github.com/awslabs/amazon-kinesis-client/pull/170)
|
||||||
|
* Updated to version 1.11.151 of the AWS Java SDK
|
||||||
|
* [PR #183](https://github.com/awslabs/amazon-kinesis-client/pull/183)
|
||||||
|
|
||||||
|
## Release 1.7.5 (April 7, 2017)
|
||||||
|
* Correctly handle throttling for DescribeStream, and save accumulated progress from individual calls.
|
||||||
|
* [PR #152](https://github.com/awslabs/amazon-kinesis-client/pull/152)
|
||||||
|
* Upgrade to version 1.11.115 of the AWS Java SDK
|
||||||
|
* [PR #155](https://github.com/awslabs/amazon-kinesis-client/pull/155)
|
||||||
|
|
||||||
|
## Release 1.7.4 (February 27, 2017)
|
||||||
|
* Fixed an issue building JavaDoc for Java 8.
|
||||||
|
* [Issue #18](https://github.com/awslabs/amazon-kinesis-client/issues/18)
|
||||||
|
* [PR #141](https://github.com/awslabs/amazon-kinesis-client/pull/141)
|
||||||
|
* Reduce Throttling Messages to WARN, unless throttling occurs 6 times consecutively.
|
||||||
|
* [Issue #4](https://github.com/awslabs/amazon-kinesis-client/issues/4)
|
||||||
|
* [PR #140](https://github.com/awslabs/amazon-kinesis-client/pull/140)
|
||||||
|
* Fixed two bugs occurring in requestShutdown.
|
||||||
|
* Fixed a bug that prevented the worker from shutting down, via requestShutdown, when no leases were held.
|
||||||
|
* [Issue #128](https://github.com/awslabs/amazon-kinesis-client/issues/128)
|
||||||
|
* Fixed a bug that could trigger a NullPointerException if leases changed during requestShutdown.
|
||||||
|
* [Issue #129](https://github.com/awslabs/amazon-kinesis-client/issues/129)
|
||||||
|
* [PR #139](https://github.com/awslabs/amazon-kinesis-client/pull/139)
|
||||||
|
* Upgraded the AWS SDK Version to 1.11.91
|
||||||
|
* [PR #138](https://github.com/awslabs/amazon-kinesis-client/pull/138)
|
||||||
|
* Use an executor returned from `ExecutorService.newFixedThreadPool` instead of constructing it by hand.
|
||||||
|
* [PR #135](https://github.com/awslabs/amazon-kinesis-client/pull/135)
|
||||||
|
* Correctly initialize DynamoDB client, when endpoint is explicitly set.
|
||||||
|
* [PR #142](https://github.com/awslabs/amazon-kinesis-client/pull/142)
|
||||||
|
|
||||||
|
## Release 1.7.3 (January 9, 2017)
|
||||||
|
* Upgrade to the newest AWS Java SDK.
|
||||||
|
* [Amazon Kinesis Client Issue #27](https://github.com/awslabs/amazon-kinesis-client-python/issues/27)
|
||||||
|
* [PR #126](https://github.com/awslabs/amazon-kinesis-client/pull/126)
|
||||||
|
* [PR #125](https://github.com/awslabs/amazon-kinesis-client/pull/125)
|
||||||
|
* Added a direct dependency on commons-logging.
|
||||||
|
* [Issue #123](https://github.com/awslabs/amazon-kinesis-client/issues/123)
|
||||||
|
* [PR #124](https://github.com/awslabs/amazon-kinesis-client/pull/124)
|
||||||
|
* Make ShardInfo public to allow for custom ShardPrioritization strategies.
|
||||||
|
* [Issue #120](https://github.com/awslabs/amazon-kinesis-client/issues/120)
|
||||||
|
* [PR #127](https://github.com/awslabs/amazon-kinesis-client/pull/127)
|
||||||
|
|
||||||
|
## Release 1.7.2 (November 7, 2016)
|
||||||
|
* MultiLangDaemon Feature Updates
|
||||||
|
The MultiLangDaemon has been upgraded to use the v2 interfaces, which allows access to enhanced checkpointing, and more information during record processor initialization. The MultiLangDaemon clients must be updated before they can take advantage of these new features.
|
||||||
|
|
||||||
|
## Release 1.7.1 (November 3, 2016)
|
||||||
|
* General
|
||||||
|
* Allow disabling shard synchronization at startup.
|
||||||
|
* Applications can disable shard synchronization at startup. Disabling shard synchronization can application startup times for very large streams.
|
||||||
|
* [PR #102](https://github.com/awslabs/amazon-kinesis-client/pull/102)
|
||||||
|
* Applications can now request a graceful shutdown, and record processors that implement the IShutdownNotificationAware will be given a chance to checkpoint before being shutdown.
|
||||||
|
* This adds a [new interface](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IShutdownNotificationAware.java), and a [new method on Worker](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/Worker.java#L539).
|
||||||
|
* [PR #109](https://github.com/awslabs/amazon-kinesis-client/pull/109)
|
||||||
|
* Solves [Issue #79](https://github.com/awslabs/amazon-kinesis-client/issues/79)
|
||||||
|
* MultiLangDaemon
|
||||||
|
* Applications can now use credential provides that accept string parameters.
|
||||||
|
* [PR #99](https://github.com/awslabs/amazon-kinesis-client/pull/99)
|
||||||
|
* Applications can now use different credentials for each service.
|
||||||
|
* [PR #111](https://github.com/awslabs/amazon-kinesis-client/pull/111)
|
||||||
|
|
||||||
|
## Release 1.7.0 (August 22, 2016)
|
||||||
|
* Add support for time based iterators ([See GetShardIterator Documentation](http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html))
|
||||||
|
* [PR #94](https://github.com/awslabs/amazon-kinesis-client/pull/94)
|
||||||
|
The `KinesisClientLibConfiguration` now supports providing an initial time stamp position.
|
||||||
|
* This position is only used if there is no current checkpoint for the shard.
|
||||||
|
* This setting cannot be used with DynamoDB Streams
|
||||||
|
Resolves [Issue #88](https://github.com/awslabs/amazon-kinesis-client/issues/88)
|
||||||
|
* Allow Prioritization of Parent Shards for Task Assignment
|
||||||
|
* [PR #95](https://github.com/awslabs/amazon-kinesis-client/pull/95)
|
||||||
|
The `KinesisClientLibconfiguration` now supports providing a `ShardPrioritization` strategy. This strategy controls how the `Worker` determines which `ShardConsumer` to call next. This can improve processing for streams that split often, such as DynamoDB Streams.
|
||||||
|
* Remove direct dependency on `aws-java-sdk-core`, to allow independent versioning.
|
||||||
|
* [PR #92](https://github.com/awslabs/amazon-kinesis-client/pull/92)
|
||||||
|
**You may need to add a direct dependency on aws-java-sdk-core if other dependencies include an older version.**
|
||||||
|
|
||||||
|
## Release 1.6.5 (July 25, 2016)
|
||||||
|
* Change LeaseManager to call DescribeTable before attempting to create the lease table.
|
||||||
|
* [Issue #36](https://github.com/awslabs/amazon-kinesis-client/issues/36)
|
||||||
|
* [PR #41](https://github.com/awslabs/amazon-kinesis-client/pull/41)
|
||||||
|
* [PR #67](https://github.com/awslabs/amazon-kinesis-client/pull/67)
|
||||||
|
* Allow DynamoDB lease table name to be specified
|
||||||
|
* [PR #61](https://github.com/awslabs/amazon-kinesis-client/pull/61)
|
||||||
|
* Add approximateArrivalTimestamp for JsonFriendlyRecord
|
||||||
|
* [PR #86](https://github.com/awslabs/amazon-kinesis-client/pull/86)
|
||||||
|
* Shutdown lease renewal thread pool on exit.
|
||||||
|
* [PR #84](https://github.com/awslabs/amazon-kinesis-client/pull/84)
|
||||||
|
* Wait for CloudWatch publishing thread to finish before exiting.
|
||||||
|
* [PR #82](https://github.com/awslabs/amazon-kinesis-client/pull/82)
|
||||||
|
* Added unit, and integration tests for the library.
|
||||||
|
|
||||||
|
## Release 1.6.4 (July 6, 2016)
|
||||||
|
* Upgrade to AWS SDK for Java 1.11.14
|
||||||
|
* [Issue #74](https://github.com/awslabs/amazon-kinesis-client/issues/74)
|
||||||
|
* [Issue #73](https://github.com/awslabs/amazon-kinesis-client/issues/73)
|
||||||
|
* **Maven Artifact Signing Change**
|
||||||
|
* Artifacts are now signed by the identity `Amazon Kinesis Tools <amazon-kinesis-tools@amazon.com>`
|
||||||
|
|
||||||
|
## Release 1.6.3 (May 12, 2016)
|
||||||
|
* Fix format exception caused by DEBUG log in LeaseTaker [Issue # 68](https://github.com/awslabs/amazon-kinesis-client/issues/68)
|
||||||
|
|
||||||
|
## Release 1.6.2 (March 23, 2016)
|
||||||
|
* Support for specifying max leases per worker and max leases to steal at a time.
|
||||||
|
* Support for specifying initial DynamoDB table read and write capacity.
|
||||||
|
* Support for parallel lease renewal.
|
||||||
|
* Support for graceful worker shutdown.
|
||||||
|
* Change DefaultCWMetricsPublisher log level to debug. [PR # 49](https://github.com/awslabs/amazon-kinesis-client/pull/49)
|
||||||
|
* Avoid NPE in MLD record processor shutdown if record processor was not initialized. [Issue # 29](https://github.com/awslabs/amazon-kinesis-client/issues/29)
|
||||||
|
|
||||||
|
## Release 1.6.1 (September 23, 2015)
|
||||||
|
* Expose [approximateArrivalTimestamp](http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) for Records in processRecords API call.
|
||||||
|
|
||||||
|
## Release 1.6.0 (July 31, 2015)
|
||||||
|
* Restores compatibility with [dynamodb-streams-kinesis-adapter](https://github.com/awslabs/dynamodb-streams-kinesis-adapter) (which was broken in 1.4.0).
|
||||||
|
|
||||||
|
## Release 1.5.1 (July 20, 2015)
|
||||||
|
* KCL maven artifact 1.5.0 does not work with JDK 7. This release addresses this issue.
|
||||||
|
|
||||||
|
## Release 1.5.0 (July 9, 2015)
|
||||||
|
* **[Metrics Enhancements][kinesis-guide-monitoring-with-kcl]**
|
||||||
|
* Support metrics level and dimension configurations to control CloudWatch metrics emitted by the KCL.
|
||||||
|
* Add new metrics that track time spent in record processor methods.
|
||||||
|
* Disable WorkerIdentifier dimension by default.
|
||||||
|
* **Exception Reporting** — Do not silently ignore exceptions in ShardConsumer.
|
||||||
|
* **AWS SDK Component Dependencies** — Depend only on AWS SDK components that are used.
|
||||||
|
|
||||||
|
## Release 1.4.0 (June 2, 2015)
|
||||||
|
* Integration with the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]**
|
||||||
|
* Automatically de-aggregate records put into the Kinesis stream using the KPL.
|
||||||
|
* Support checkpointing at the individual user record level when multiple user records are aggregated into one Kinesis record using the KPL.
|
||||||
|
|
||||||
|
See [Consumer De-aggregation with the KCL][kinesis-guide-consumer-deaggregation] for details.
|
||||||
|
|
||||||
|
## Release 1.3.0 (May 22, 2015)
|
||||||
|
* A new metric called "MillisBehindLatest", which tracks how far consumers are from real time, is now uploaded to CloudWatch.
|
||||||
|
|
||||||
|
## Release 1.2.1 (January 26, 2015)
|
||||||
|
* **MultiLangDaemon** — Changes to the MultiLangDaemon to make it easier to provide a custom worker.
|
||||||
|
|
||||||
|
## Release 1.2 (October 21, 2014)
|
||||||
|
* **Multi-Language Support** — Amazon KCL now supports implementing record processors in any language by communicating with the daemon over [STDIN and STDOUT][multi-lang-protocol]. Python developers can directly use the [Amazon Kinesis Client Library for Python][kclpy] to write their data processing applications.
|
||||||
|
|
||||||
|
## Release 1.1 (June 30, 2014)
|
||||||
|
* **Checkpointing at a specific sequence number** — The IRecordProcessorCheckpointer interface now supports checkpointing at a sequence number specified by the record processor.
|
||||||
|
* **Set region** — KinesisClientLibConfiguration now supports setting the region name to indicate the location of the Amazon Kinesis service. The Amazon DynamoDB table and Amazon CloudWatch metrics associated with your application will also use this region setting.
|
||||||
|
|
||||||
|
[kinesis]: http://aws.amazon.com/kinesis
|
||||||
|
[kinesis-forum]: http://developer.amazonwebservices.com/connect/forum.jspa?forumID=169
|
||||||
|
[kinesis-client-library-issues]: https://github.com/awslabs/amazon-kinesis-client/issues
|
||||||
|
[docs-signup]: http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-setup.html
|
||||||
|
[kinesis-guide]: http://docs.aws.amazon.com/kinesis/latest/dev/introduction.html
|
||||||
|
[kinesis-guide-begin]: http://docs.aws.amazon.com/kinesis/latest/dev/before-you-begin.html
|
||||||
|
[kinesis-guide-create]: http://docs.aws.amazon.com/kinesis/latest/dev/step-one-create-stream.html
|
||||||
|
[kinesis-guide-applications]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-record-processor-app.html
|
||||||
|
[kinesis-guide-monitoring-with-kcl]: http://docs.aws.amazon.com//kinesis/latest/dev/monitoring-with-kcl.html
|
||||||
|
[kinesis-guide-kpl]: http://docs.aws.amazon.com//kinesis/latest/dev/developing-producers-with-kpl.html
|
||||||
|
[kinesis-guide-consumer-deaggregation]: http://docs.aws.amazon.com//kinesis/latest/dev/kinesis-kpl-consumer-deaggregation.html
|
||||||
|
[kclpy]: https://github.com/awslabs/amazon-kinesis-client-python
|
||||||
|
[multi-lang-protocol]: https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/multilang/package-info.java
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ Manifest-Version: 1.0
|
||||||
Bundle-ManifestVersion: 2
|
Bundle-ManifestVersion: 2
|
||||||
Bundle-Name: Amazon Kinesis Client Library for Java
|
Bundle-Name: Amazon Kinesis Client Library for Java
|
||||||
Bundle-SymbolicName: com.amazonaws.kinesisclientlibrary;singleton:=true
|
Bundle-SymbolicName: com.amazonaws.kinesisclientlibrary;singleton:=true
|
||||||
Bundle-Version: 2.0.0
|
Bundle-Version: 1.9.1
|
||||||
Bundle-Vendor: Amazon Technologies, Inc
|
Bundle-Vendor: Amazon Technologies, Inc
|
||||||
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
|
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
|
||||||
Require-Bundle: org.apache.commons.codec;bundle-version="1.6",
|
Require-Bundle: org.apache.commons.codec;bundle-version="1.6",
|
||||||
|
|
@ -15,13 +15,13 @@ Require-Bundle: org.apache.commons.codec;bundle-version="1.6",
|
||||||
com.amazonaws.sdk;bundle-version="1.11.319",
|
com.amazonaws.sdk;bundle-version="1.11.319",
|
||||||
Export-Package: com.amazonaws.services.kinesis,
|
Export-Package: com.amazonaws.services.kinesis,
|
||||||
com.amazonaws.services.kinesis.clientlibrary,
|
com.amazonaws.services.kinesis.clientlibrary,
|
||||||
com.amazonaws.services.kinesis.clientlibrary.kinesisClientLibConfiguration,
|
com.amazonaws.services.kinesis.clientlibrary.config,
|
||||||
com.amazonaws.services.kinesis.clientlibrary.exceptions,
|
com.amazonaws.services.kinesis.clientlibrary.exceptions,
|
||||||
com.amazonaws.services.kinesis.clientlibrary.exceptions.internal,
|
com.amazonaws.services.kinesis.clientlibrary.exceptions.internal,
|
||||||
com.amazonaws.services.kinesis.clientlibrary.interfaces,
|
com.amazonaws.services.kinesis.clientlibrary.interfaces,
|
||||||
com.amazonaws.services.kinesis.clientlibrary.lib,
|
com.amazonaws.services.kinesis.clientlibrary.lib,
|
||||||
com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint,
|
com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint,
|
||||||
com.amazonaws.services.kinesis.clientlibrary.lib.scheduler,
|
com.amazonaws.services.kinesis.clientlibrary.lib.worker,
|
||||||
com.amazonaws.services.kinesis.clientlibrary.proxies,
|
com.amazonaws.services.kinesis.clientlibrary.proxies,
|
||||||
com.amazonaws.services.kinesis.clientlibrary.types,
|
com.amazonaws.services.kinesis.clientlibrary.types,
|
||||||
com.amazonaws.services.kinesis.leases,
|
com.amazonaws.services.kinesis.leases,
|
||||||
|
|
|
||||||
232
README.md
232
README.md
|
|
@ -1,59 +1,38 @@
|
||||||
# Amazon Kinesis Client Library for Java
|
|
||||||
[](https://travis-ci.org/awslabs/amazon-kinesis-client)
|
|
||||||
|
|
||||||
> [!IMPORTANT]
|
> [!IMPORTANT]
|
||||||
> ### Amazon Kinesis Client Library (KCL) 1.x will reach end-of-support on January 30, 2026
|
> # Kinesis Client Library (KCL) 1.x will reach end-of-support on January 30, 2026
|
||||||
> Amazon Kinesis Client Library (KCL) 1.x will reach end-of-support on January 30, 2026. Accordingly, these versions will enter maintenance mode on April 17, 2025. During maintenance mode, AWS will provide updates only for critical bug fixes and security issues. Major versions in maintenance mode will not receive updates for new features or feature enhancements. If you’re using KCL 1.x, we recommend migrating to the latest versions. When migrating from KCL 1.x to 3.x, you will need to update interfaces and security credential providers in your application. For details about the end-of-support notice and required actions, see the following links:
|
> Amazon Kinesis Client Library (KCL) 1.x will reach end-of-support on January 30, 2026. Accordingly, these versions will enter maintenance mode on April 17, 2025. During maintenance mode, AWS will provide updates only for critical bug fixes and security issues. Major versions in maintenance mode will not receive updates for new features or feature enhancements. If you’re using KCL 1.x, we recommend migrating to the latest versions. When migrating from KCL 1.x to 3.x, you will need to update interfaces and security credential providers in your application. For details about the end-of-support notice and required actions, see the following links:
|
||||||
|
>
|
||||||
> * [AWS Blog: Announcing end-of-support for Amazon Kinesis Client Library 1.x and Amazon Kinesis Producer Library 0.x effective January 30, 2026](https://aws.amazon.com/blogs/big-data/announcing-end-of-support-for-amazon-kinesis-client-library-1-x-and-amazon-kinesis-producer-library-0-x-effective-january-30-2026/)
|
> * [AWS Blog: Announcing end-of-support for Amazon Kinesis Client Library 1.x and Amazon Kinesis Producer Library 0.x effective January 30, 2026](https://aws.amazon.com/blogs/big-data/announcing-end-of-support-for-amazon-kinesis-client-library-1-x-and-amazon-kinesis-producer-library-0-x-effective-january-30-2026/)
|
||||||
> * [Kinesis documentation: KCL version lifecycle policy](https://docs.aws.amazon.com/streams/latest/dev/kcl-version-lifecycle-policy.html)
|
> * [Kinesis documentation: KCL version lifecycle policy](https://docs.aws.amazon.com/streams/latest/dev/kcl-version-lifecycle-policy.html)
|
||||||
> * [Kinesis documentation: Migrating from KCL 1.x to KCL 3.x](https://docs.aws.amazon.com/streams/latest/dev/kcl-migration-1-3.html)
|
> * [Kinesis documentation: Migrating from KCL 1.x to KCL 3.x](https://docs.aws.amazon.com/streams/latest/dev/kcl-migration-1-3.html)
|
||||||
|
|
||||||
## Introduction
|
# Bugs in 1.14.0 version
|
||||||
|
We recommend customers to migrate to 1.14.1 to avoid [known bugs](https://github.com/awslabs/amazon-kinesis-client/issues/778) in 1.14.0 version
|
||||||
|
|
||||||
The **Amazon Kinesis Client Library (KCL) for Java** enables Java developers to easily consume and process data from [Amazon Kinesis Data Streams][kinesis].
|
# Amazon Kinesis Client Library for Java
|
||||||
|
[](https://travis-ci.org/awslabs/amazon-kinesis-client) 
|
||||||
|
|
||||||
* [Kinesis Data Streams Product Page][kinesis]
|
The **Amazon Kinesis Client Library for Java** (Amazon KCL) enables Java developers to easily consume and process data from [Amazon Kinesis][kinesis].
|
||||||
* [Amazon re:Post Forum: Kinesis][kinesis-forum]
|
|
||||||
* [Javadoc][kcl-javadoc]
|
* [Kinesis Product Page][kinesis]
|
||||||
* [FAQ](docs/FAQ.md)
|
* [Forum][kinesis-forum]
|
||||||
* [Developer Guide - Kinesis Client Library][kcl-aws-doc]
|
|
||||||
* [KCL GitHub documentation](docs/) (folder)
|
|
||||||
* [Issues][kinesis-client-library-issues]
|
* [Issues][kinesis-client-library-issues]
|
||||||
* [Giving Feedback][giving-feedback]
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
* **Scalability:** KCL enables applications to scale dynamically by distributing the processing load across multiple workers. You can scale your application in or out, manually or with auto-scaling, without worrying about load redistribution.
|
* Provides an easy-to-use programming model for processing data using Amazon Kinesis
|
||||||
* **Load balancing:** KCL automatically balances the processing load across available workers, resulting in an even distribution of work across workers.
|
* Helps with scale-out and fault-tolerant processing
|
||||||
* **Checkpointing:** KCL manages checkpointing of processed records, enabling applications to resume processing from their last sucessfully processed position.
|
|
||||||
* **Fault tolerance:** KCL provides built-in fault tolerance mechanisms, making sure that data processing continues even if individual workers fail. KCL also provides at-least-once delivery.
|
|
||||||
* **Handling stream-level changes:** KCL adapts to shard splits and merges that might occur due to changes in data volume. It maintains ordering by making sure that child shards are processed only after their parent shard is completed and checkpointed.
|
|
||||||
* **Monitoring:** KCL integrates with Amazon CloudWatch for consumer-level monitoring.
|
|
||||||
* **Multi-language support:** KCL natively supports Java and enables multiple non-Java programming languages through MultiLangDaemon.
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
1. **Sign up for AWS** — Before you begin, you need an AWS account. For more information about creating an AWS account and retrieving your AWS credentials, see [AWS Account and Credentials][docs-signup] in the AWS SDK for Java Developer Guide.
|
1. **Sign up for AWS** — Before you begin, you need an AWS account. For more information about creating an AWS account and retrieving your AWS credentials, see [AWS Account and Credentials][docs-signup] in the AWS SDK for Java Developer Guide.
|
||||||
2. **Sign up for Amazon Kinesis** — Go to the Amazon Kinesis console to sign up for the service and create an Amazon Kinesis stream. For more information, see [Create an Amazon Kinesis Stream][kinesis-guide-create] in the Amazon Kinesis Developer Guide.
|
1. **Sign up for Amazon Kinesis** — Go to the Amazon Kinesis console to sign up for the service and create an Amazon Kinesis stream. For more information, see [Create an Amazon Kinesis Stream][kinesis-guide-create] in the Amazon Kinesis Developer Guide.
|
||||||
3. **Minimum requirements** — To use the Amazon Kinesis Client Library, you will need **Java 1.8+**. For more information about Amazon Kinesis Client Library requirements, see [Before You Begin][kinesis-guide-begin] in the Amazon Kinesis Developer Guide.
|
1. **Minimum requirements** — To use the Amazon Kinesis Client Library, you'll need **Java 1.8+**. For more information about Amazon Kinesis Client Library requirements, see [Before You Begin][kinesis-guide-begin] in the Amazon Kinesis Developer Guide.
|
||||||
4. **Using the Amazon Kinesis Client Library** — The best way to get familiar with the Amazon Kinesis Client Library is to read [Use Kinesis Client Library][kinesis-guide-applications] in the Amazon Kinesis Data Streams Developer Guide. For more information on core KCL concepts, please refer to the [KCL Concepts][kinesis-client-library-concepts] page.
|
1. **Using the Amazon Kinesis Client Library** — The best way to get familiar with the Amazon Kinesis Client Library is to read [Developing Record Consumer Applications][kinesis-guide-applications] in the Amazon Kinesis Developer Guide.
|
||||||
|
|
||||||
## Building from Source
|
## Building from Source
|
||||||
|
|
||||||
After you have downloaded the code from GitHub, you can build it using Maven. To disable GPG signing in the build, use
|
After you've downloaded the code from GitHub, you can build it using Maven. To disable GPG signing in the build, use this command: `mvn clean install -Dgpg.skip=true`
|
||||||
this command: `mvn clean install -Dgpg.skip=true`.
|
|
||||||
Note: This command does not run integration tests.
|
|
||||||
|
|
||||||
To disable running unit tests in the build, add the property `-Dskip.ut=true`.
|
|
||||||
|
|
||||||
## Running Integration Tests
|
|
||||||
|
|
||||||
Note that running integration tests creates AWS resources.
|
|
||||||
Integration tests require valid AWS credentials.
|
|
||||||
This will look for a default AWS profile specified in your local `.aws/credentials`.
|
|
||||||
To run all integration tests: `mvn verify -DskipITs=false`.
|
|
||||||
To run one integration tests, specify the integration test class: `mvn -Dit.test="BasicStreamConsumerIntegrationTest" -DskipITs=false verify`
|
|
||||||
Optionally, you can provide the name of an IAM user/role to run tests with as a string using this command: `mvn -DskipITs=false -DawsProfile="<PROFILE_NAME>" verify`.
|
|
||||||
|
|
||||||
## Integration with the Kinesis Producer Library
|
## Integration with the Kinesis Producer Library
|
||||||
For producer-side developers using the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]**, the KCL integrates without additional effort. When the KCL retrieves an aggregated Amazon Kinesis record consisting of multiple KPL user records, it will automatically invoke the KPL to extract the individual user records before returning them to the user.
|
For producer-side developers using the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]**, the KCL integrates without additional effort. When the KCL retrieves an aggregated Amazon Kinesis record consisting of multiple KPL user records, it will automatically invoke the KPL to extract the individual user records before returning them to the user.
|
||||||
|
|
@ -61,71 +40,139 @@ For producer-side developers using the **[Kinesis Producer Library (KPL)][kinesi
|
||||||
## Amazon KCL support for other languages
|
## Amazon KCL support for other languages
|
||||||
To make it easier for developers to write record processors in other languages, we have implemented a Java based daemon, called MultiLangDaemon that does all the heavy lifting. Our approach has the daemon spawn a sub-process, which in turn runs the record processor, which can be written in any language. The MultiLangDaemon process and the record processor sub-process communicate with each other over [STDIN and STDOUT using a defined protocol][multi-lang-protocol]. There will be a one to one correspondence amongst record processors, child processes, and shards. For Python developers specifically, we have abstracted these implementation details away and [expose an interface][kclpy] that enables you to focus on writing record processing logic in Python. This approach enables KCL to be language agnostic, while providing identical features and similar parallel processing model across all languages.
|
To make it easier for developers to write record processors in other languages, we have implemented a Java based daemon, called MultiLangDaemon that does all the heavy lifting. Our approach has the daemon spawn a sub-process, which in turn runs the record processor, which can be written in any language. The MultiLangDaemon process and the record processor sub-process communicate with each other over [STDIN and STDOUT using a defined protocol][multi-lang-protocol]. There will be a one to one correspondence amongst record processors, child processes, and shards. For Python developers specifically, we have abstracted these implementation details away and [expose an interface][kclpy] that enables you to focus on writing record processing logic in Python. This approach enables KCL to be language agnostic, while providing identical features and similar parallel processing model across all languages.
|
||||||
|
|
||||||
## Using the KCL
|
## Release Notes
|
||||||
The recommended way to use the KCL for Java is to consume it from Maven.
|
|
||||||
|
|
||||||
## KCL versions
|
### Latest Release (1.15.2 - Aug 14, 2024)
|
||||||
|
* [#1371](https://github.com/awslabs/amazon-kinesis-client/pull/1371) Fix a bug in debug and trace logging levels for worker
|
||||||
|
* [#1224](https://github.com/awslabs/amazon-kinesis-client/pull/1224) Modify RecordProcessorCheckpointer#advancePosition Metrics usage to ensure proper closure
|
||||||
|
* [#1345](https://github.com/awslabs/amazon-kinesis-client/pull/1345) Generate wrappers from proto files instead of shipping them directly
|
||||||
|
* [#1346](https://github.com/awslabs/amazon-kinesis-client/pull/1346) Upgrade com.google.protobuf:protobuf-java from 3.23.4 to 4.27.1
|
||||||
|
* [#1338](https://github.com/awslabs/amazon-kinesis-client/pull/1338) Upgrade org.apache.logging.log4j:log4j-api from 2.20.0 to 2.23.1
|
||||||
|
* [#1327](https://github.com/awslabs/amazon-kinesis-client/pull/1327) Upgrade com.google.guava:guava from 33.0.0-jre to 33.2.0-jre
|
||||||
|
* [#1283](https://github.com/awslabs/amazon-kinesis-client/pull/1283) Upgrade com.fasterxml.jackson.core:jackson-core from 2.15.2 to 2.17.0
|
||||||
|
* [#1284](https://github.com/awslabs/amazon-kinesis-client/pull/1284) Upgrade aws-java-sdk.version from 1.12.647 to 1.12.681
|
||||||
|
* [#1288](https://github.com/awslabs/amazon-kinesis-client/pull/1288) Upgrade commons-logging:commons-logging from 1.2 to 1.3.1
|
||||||
|
* [#1289](https://github.com/awslabs/amazon-kinesis-client/pull/1289) Upgrade org.projectlombok:lombok from 1.18.22 to 1.18.32
|
||||||
|
* [#1248](https://github.com/awslabs/amazon-kinesis-client/pull/1248) Upgrade org.apache.maven.plugins:maven-surefire-plugin from 2.22.2 to 3.2.5
|
||||||
|
* [#1234](https://github.com/awslabs/amazon-kinesis-client/pull/1234) Upgrade org.apache.maven.plugins:maven-javadoc-plugin from 3.4.1 to 3.6.3
|
||||||
|
* [#1137](https://github.com/awslabs/amazon-kinesis-client/pull/1137) Upgrade maven-failsafe-plugin from 2.22.2 to 3.1.2
|
||||||
|
* [#1134](https://github.com/awslabs/amazon-kinesis-client/pull/1134) Upgrade jackson-core from 2.15.0 to 2.15.2
|
||||||
|
* [#1119](https://github.com/awslabs/amazon-kinesis-client/pull/1119) Upgrade maven-source-plugin from 3.2.1 to 3.3.0
|
||||||
|
* [#1165](https://github.com/awslabs/amazon-kinesis-client/pull/1165) Upgrade protobuf-java from 3.19.6 to 3.23.4
|
||||||
|
|
||||||
> [!WARNING]
|
### Release (1.15.1 - Feb 5, 2023)
|
||||||
> ### Do not use AWS SDK for Java versions 2.27.19 to 2.27.23 with KCL 3.x
|
* [#1214](https://github.com/awslabs/amazon-kinesis-client/pull/1214) Added backoff logic for ShardSyncTaskIntegrationTest
|
||||||
> When using KCL 3.x with AWS SDK for Java versions 2.27.19 through 2.27.23, you may encounter the following DynamoDB exception:
|
* [#1214](https://github.com/awslabs/amazon-kinesis-client/pull/1214) Upgrade Guava version from 31.0.1 to 32.1.1
|
||||||
> ```software.amazon.awssdk.services.dynamodb.model.DynamoDbException: The document path provided in the update expression is invalid for update (Service: DynamoDb, Status Code: 400, Request ID: xxx)```.
|
* [#1252](https://github.com/awslabs/amazon-kinesis-client/pull/1252) Upgrade aws-java-sdk from 1.12.406 to 1.12.647
|
||||||
> This error occurs due to [a known issue](https://github.com/aws/aws-sdk-java-v2/issues/5584) in the AWS SDK for Java that affects the DynamoDB metadata table managed by KCL 3.x. The issue was introduced in version 2.27.19 and impacts all versions up to 2.27.23. The issue has been resolved in the AWS SDK for Java version 2.27.24. For optimal performance and stability, we recommend upgrading to version 2.28.0 or later.
|
|
||||||
|
|
||||||
### Version 3.x
|
### Release (1.15.0 - Jun 8, 2023)
|
||||||
``` xml
|
* **[#1108](https://github.com/awslabs/amazon-kinesis-client/pull/1108) Add support for Stream ARNs**
|
||||||
<dependency>
|
* [#1111](https://github.com/awslabs/amazon-kinesis-client/pull/1111) More consistent testing behavior with HashRangesAreAlwaysComplete
|
||||||
<groupId>software.amazon.kinesis</groupId>
|
* [#1054](https://github.com/awslabs/amazon-kinesis-client/pull/1054) Upgrade log4j-core from 2.17.1 to 2.20.0
|
||||||
<artifactId>amazon-kinesis-client</artifactId>
|
* [#1103](https://github.com/awslabs/amazon-kinesis-client/pull/1103) Upgrade jackson-core from 2.13.0 to 2.15.0
|
||||||
<version>3.0.3</version>
|
* [#943](https://github.com/awslabs/amazon-kinesis-client/pull/943) Upgrade nexus-staging-maven-plugin from 1.6.8 to 1.6.13
|
||||||
</dependency>
|
* [#1044](https://github.com/awslabs/amazon-kinesis-client/pull/1044) Upgrade aws-java-sdk.version from 1.12.406 to 1.12.408
|
||||||
```
|
* [#1055](https://github.com/awslabs/amazon-kinesis-client/pull/1055) Upgrade maven-compiler-plugin from 3.10.0 to 3.11.0
|
||||||
|
|
||||||
### Version 2.x
|
### Release (1.14.10 - Feb 15, 2023)
|
||||||
[Version 2.x tracking branch](https://github.com/awslabs/amazon-kinesis-client/tree/v2.x)
|
* Updated aws-java-sdk from 1.12.130 to 1.12.406
|
||||||
``` xml
|
* Updated com.google.protobuf from 3.19.4 to 3.19.6
|
||||||
<dependency>
|
* [Issue #1026](https://github.com/awslabs/amazon-kinesis-client/issues/1026)
|
||||||
<groupId>software.amazon.kinesis</groupId>
|
* [PR #1042](https://github.com/awslabs/amazon-kinesis-client/pull/1042)
|
||||||
<artifactId>amazon-kinesis-client</artifactId>
|
|
||||||
<version>2.7.0</version>
|
|
||||||
</dependency>
|
|
||||||
```
|
|
||||||
### Version 1.x
|
|
||||||
[Version 1.x tracking branch](https://github.com/awslabs/amazon-kinesis-client/tree/v1.x)
|
|
||||||
``` xml
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.amazonaws</groupId>
|
|
||||||
<artifactId>amazon-kinesis-client</artifactId>
|
|
||||||
<version>1.14.1</version>
|
|
||||||
</dependency>
|
|
||||||
```
|
|
||||||
### Release Notes
|
|
||||||
|
|
||||||
| KCL Version | Changelog |
|
### Release (1.14.9 - Dec 14, 2022)
|
||||||
| --- | --- |
|
* [#995](https://github.com/awslabs/amazon-kinesis-client/commit/372f98b21a91487e36612d528c56765a44b0aa86) Every other change for DynamoDBStreamsKinesis Adapter Compatibility
|
||||||
| 3.x | [master/CHANGELOG.md](CHANGELOG.md) |
|
* [#970](https://github.com/awslabs/amazon-kinesis-client/commit/251b331a2e0fd912b50f8b5a12d088bf0b3263b9) PeriodicShardSyncManager Changes Needed for DynamoDBStreamsKinesisAdapter
|
||||||
| 2.x | [v2.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v2.x/CHANGELOG.md) |
|
|
||||||
| 1.x | [v1.x/CHANGELOG.md](https://github.com/awslabs/amazon-kinesis-client/blob/v1.x/CHANGELOG.md) |
|
|
||||||
|
|
||||||
### Version recommendation
|
### Release (1.14.8 - Feb 24, 2022)
|
||||||
We recommend all users to migrate to the latest respective versions to avoid known issues and benefit from all improvements.
|
* [Bump log4j-core from 2.17.0 to 2.17.1](https://github.com/awslabs/amazon-kinesis-client/commit/94b138a9d9a502ee0f4f000bb0efd2766ebadc37)
|
||||||
|
* [Bump protobuf-java from 3.19.1 to 3.19.4](https://github.com/awslabs/amazon-kinesis-client/commit/a809b12c43c57a3d6ad3827feb60e4322614259c)
|
||||||
|
* [Bump maven-compiler-plugin from 3.8.1 to 3.10.0](https://github.com/awslabs/amazon-kinesis-client/commit/37b5d7b9a1ccad483469ef542a6a7237462b14f2)
|
||||||
|
|
||||||
## Giving Feedback
|
### Release (1.14.7 - Dec 22, 2021)
|
||||||
|
* [#881](https://github.com/awslabs/amazon-kinesis-client/pull/881) Update log4j test dependency from 2.16.0 to 2.17.0 and some other dependencies
|
||||||
|
|
||||||
Help Us Improve the Kinesis Client Library! Your involvement is crucial to enhancing the Kinesis Client Library. We invite you to join our community and contribute in the following ways:
|
### Release (1.14.6 - Dec 15, 2021)
|
||||||
|
* [#876](https://github.com/awslabs/amazon-kinesis-client/pull/876) Update log4j test dependency from 2.15.0 to 2.16.0
|
||||||
|
|
||||||
* [Issue](https://github.com/awslabs/amazon-kinesis-client/issues) Reporting: This is our preferred method of communication. Use this channel to report bugs, suggest improvements, or ask questions.
|
### Release (1.14.5 - Dec 10, 2021)
|
||||||
* Feature Requests: Share your ideas for new features or vote for existing proposals on our [Issues](https://github.com/awslabs/amazon-kinesis-client/issues) page. This helps us prioritize development efforts.
|
* [#872](https://github.com/awslabs/amazon-kinesis-client/pull/872) Update log4j test dependency from 1.2.17 to 2.15.0
|
||||||
* Participate in Discussions: Engage with other users and our team in our discussion forums.
|
* [#873](https://github.com/awslabs/amazon-kinesis-client/pull/873) Upgrading version of AWS Java SDK to 1.12.128
|
||||||
* Submit [Pull Requests](https://github.com/awslabs/amazon-kinesis-client/pulls): If you have developed a fix or improvement, we welcome your code contributions.
|
|
||||||
|
|
||||||
By participating through these channels, you play a vital role in shaping the future of the Kinesis Client Library. We value your input and look forward to collaborating with you!
|
### Release (1.14.4 - June 14, 2021)
|
||||||
|
* [Milestone#61](https://github.com/awslabs/amazon-kinesis-client/milestone/61)
|
||||||
|
* [#816](https://github.com/awslabs/amazon-kinesis-client/pull/816) Updated the Worker shutdown logic to make sure that the `LeaseCleanupManager` also terminates all the threads that it has started.
|
||||||
|
* [#821](https://github.com/awslabs/amazon-kinesis-client/pull/821) Upgrading version of AWS Java SDK to 1.12.3
|
||||||
|
|
||||||
|
### Release (1.14.3 - May 3, 2021)
|
||||||
|
* [Milestone#60](https://github.com/awslabs/amazon-kinesis-client/milestone/60)
|
||||||
|
* [#811](https://github.com/awslabs/amazon-kinesis-client/pull/811) Fixing a bug in `KinesisProxy` that can lead to undetermined behavior during partial failures.
|
||||||
|
* [#811](https://github.com/awslabs/amazon-kinesis-client/pull/811) Adding guardrails to handle duplicate shards from the service.
|
||||||
|
|
||||||
|
## Release (1.14.2 - February 24, 2021)
|
||||||
|
* [Milestone#57](https://github.com/awslabs/amazon-kinesis-client/milestone/57)
|
||||||
|
* [#790](https://github.com/awslabs/amazon-kinesis-client/pull/790) Fixing a bug that caused paginated `ListShards` calls with the `ShardFilter` parameter to fail when the lease table was being initialized.
|
||||||
|
|
||||||
|
### Release (1.14.1 - January 27, 2021)
|
||||||
|
* [Milestone#56](https://github.com/awslabs/amazon-kinesis-client/milestone/56)
|
||||||
|
|
||||||
|
* Fix for cross DDB table interference when multiple KCL applications are run in same JVM.
|
||||||
|
* Fix and guards to avoid potential checkpoint rewind during shard end, which may block children shard processing.
|
||||||
|
* Fix for thread cycle wastage on InitializeTask for deleted shard.
|
||||||
|
* Improved logging in LeaseCleanupManager that would indicate why certain shards are not cleaned up from the lease table.
|
||||||
|
|
||||||
|
### Release (1.14.0 - August 17, 2020)
|
||||||
|
|
||||||
|
* [Milestone#50](https://github.com/awslabs/amazon-kinesis-client/milestone/50)
|
||||||
|
|
||||||
|
* Behavior of shard synchronization is moving from each worker independently learning about all existing shards to workers only discovering the children of shards that each worker owns. This optimizes memory usage, lease table IOPS usage, and number of calls made to kinesis for streams with high shard counts and/or frequent resharding.
|
||||||
|
* When bootstrapping an empty lease table, KCL utilizes the ListShard API's filtering option (the ShardFilter optional request parameter) to retrieve and create leases only for a snapshot of shards open at the time specified by the ShardFilter parameter. The ShardFilter parameter enables you to filter out the response of the ListShards API, using the Type parameter. KCL uses the Type filter parameter and the following of its valid values to identify and return a snapshot of open shards that might require new leases.
|
||||||
|
* Currently, the following shard filters are supported:
|
||||||
|
* `AT_TRIM_HORIZON` - the response includes all the shards that were open at `TRIM_HORIZON`.
|
||||||
|
* `AT_LATEST` - the response includes only the currently open shards of the data stream.
|
||||||
|
* `AT_TIMESTAMP` - the response includes all shards whose start timestamp is less than or equal to the given timestamp and end timestamp is greater than or equal to the given timestamp or still open.
|
||||||
|
* `ShardFilter` is used when creating leases for an empty lease table to initialize leases for a snapshot of shards specified at `KinesisClientLibConfiguration#initialPositionInStreamExtended`.
|
||||||
|
* For more information about ShardFilter, see the [official AWS documentation on ShardFilter](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ShardFilter.html).
|
||||||
|
|
||||||
|
* Introducing support for the `ChildShards` response of the `GetRecords` API to perform lease/shard synchronization that happens at `SHARD_END` for closed shards, allowing a KCL worker to only create leases for the child shards of the shard it finished processing.
|
||||||
|
* For KCL 1.x applications, this uses the `ChildShards` response of the `GetRecords` API.
|
||||||
|
* For more information, see the official AWS Documentation on [GetRecords](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) and [ChildShard](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ChildShard.html).
|
||||||
|
|
||||||
|
* KCL now also performs additional periodic shard/lease scans in order to identify any potential holes in the lease table to ensure the complete hash range of the stream is being processed and create leases for them if required. When `KinesisClientLibConfiguration#shardSyncStrategyType` is set to `ShardSyncStrategyType.SHARD_END`, `PeriodicShardSyncManager#leasesRecoveryAuditorInconsistencyConfidenceThreshold` will be used to determine the threshold for number of consecutive scans containing holes in the lease table after which to enforce a shard sync. When `KinesisClientLibConfiguration#shardSyncStrategyType` is set to `ShardSyncStrategyType.PERIODIC`, `leasesRecoveryAuditorInconsistencyConfidenceThreshold` is ignored.
|
||||||
|
* New configuration options are available to configure `PeriodicShardSyncManager` in `KinesisClientLibConfiguration`
|
||||||
|
|
||||||
|
| Name | Default | Description |
|
||||||
|
| ----------------------------------------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| leasesRecoveryAuditorInconsistencyConfidenceThreshold | 3 | Confidence threshold for the periodic auditor job to determine if leases for a stream in the lease table is inconsistent. If the auditor finds same set of inconsistencies consecutively for a stream for this many times, then it would trigger a shard sync. Only used for `ShardSyncStrategyType.SHARD_END`. |
|
||||||
|
|
||||||
|
* New CloudWatch metrics are also now emitted to monitor the health of `PeriodicShardSyncManager`:
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --------------------------- | ------------------------------------------------------ |
|
||||||
|
| NumStreamsWithPartialLeases | Number of streams that had holes in their hash ranges. |
|
||||||
|
| NumStreamsToSync | Number of streams which underwent a full shard sync. |
|
||||||
|
|
||||||
|
* Introducing deferred lease cleanup. Leases will be deleted asynchronously by `LeaseCleanupManager` upon reaching `SHARD_END`, when a shard has either expired past the stream’s retention period or been closed as the result of a resharding operation.
|
||||||
|
* New configuration options are available to configure `LeaseCleanupManager`.
|
||||||
|
|
||||||
|
| Name | Default | Description |
|
||||||
|
| ----------------------------------- | ---------- | --------------------------------------------------------------------------------------------------------- |
|
||||||
|
| leaseCleanupIntervalMillis | 1 minute | Interval at which to run lease cleanup thread. |
|
||||||
|
| completedLeaseCleanupIntervalMillis | 5 minutes | Interval at which to check if a lease is completed or not. |
|
||||||
|
| garbageLeaseCleanupIntervalMillis | 30 minutes | Interval at which to check if a lease is garbage (i.e trimmed past the stream's retention period) or not. |
|
||||||
|
|
||||||
|
* Including an optimization to `KinesisShardSyncer` to only create leases for one layer of shards.
|
||||||
|
* Changing default shard prioritization strategy to be `NoOpShardPrioritization` to allow prioritization of completed shards. Customers who are upgrading to this version and are reading from `TRIM_HORIZON` should continue using `ParentsFirstShardPrioritization` while upgrading.
|
||||||
|
* Upgrading version of AWS SDK to 1.11.844.
|
||||||
|
* [#719](https://github.com/awslabs/amazon-kinesis-client/pull/719) Upgrading version of Google Protobuf to 3.11.4.
|
||||||
|
* [#712](https://github.com/awslabs/amazon-kinesis-client/pull/712) Allowing KCL to consider lease tables in `UPDATING` healthy.
|
||||||
|
|
||||||
|
###### For remaining release notes check **[CHANGELOG.md][changelog-md]**.
|
||||||
|
|
||||||
[docs-signup]: http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-setup.html
|
|
||||||
[kcl-javadoc]: https://javadoc.io/doc/software.amazon.kinesis/amazon-kinesis-client/
|
|
||||||
[kinesis]: http://aws.amazon.com/kinesis
|
[kinesis]: http://aws.amazon.com/kinesis
|
||||||
[kinesis-client-library-issues]: https://github.com/awslabs/amazon-kinesis-client/issues
|
|
||||||
[kinesis-forum]: http://developer.amazonwebservices.com/connect/forum.jspa?forumID=169
|
[kinesis-forum]: http://developer.amazonwebservices.com/connect/forum.jspa?forumID=169
|
||||||
|
[kinesis-client-library-issues]: https://github.com/awslabs/amazon-kinesis-client/issues
|
||||||
|
[docs-signup]: http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-setup.html
|
||||||
[kinesis-guide]: http://docs.aws.amazon.com/kinesis/latest/dev/introduction.html
|
[kinesis-guide]: http://docs.aws.amazon.com/kinesis/latest/dev/introduction.html
|
||||||
[kinesis-guide-begin]: http://docs.aws.amazon.com/kinesis/latest/dev/before-you-begin.html
|
[kinesis-guide-begin]: http://docs.aws.amazon.com/kinesis/latest/dev/before-you-begin.html
|
||||||
[kinesis-guide-create]: http://docs.aws.amazon.com/kinesis/latest/dev/step-one-create-stream.html
|
[kinesis-guide-create]: http://docs.aws.amazon.com/kinesis/latest/dev/step-one-create-stream.html
|
||||||
|
|
@ -134,8 +181,5 @@ By participating through these channels, you play a vital role in shaping the fu
|
||||||
[kinesis-guide-kpl]: http://docs.aws.amazon.com//kinesis/latest/dev/developing-producers-with-kpl.html
|
[kinesis-guide-kpl]: http://docs.aws.amazon.com//kinesis/latest/dev/developing-producers-with-kpl.html
|
||||||
[kinesis-guide-consumer-deaggregation]: http://docs.aws.amazon.com//kinesis/latest/dev/kinesis-kpl-consumer-deaggregation.html
|
[kinesis-guide-consumer-deaggregation]: http://docs.aws.amazon.com//kinesis/latest/dev/kinesis-kpl-consumer-deaggregation.html
|
||||||
[kclpy]: https://github.com/awslabs/amazon-kinesis-client-python
|
[kclpy]: https://github.com/awslabs/amazon-kinesis-client-python
|
||||||
[multi-lang-protocol]: /amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java
|
[multi-lang-protocol]: https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/multilang/package-info.java
|
||||||
[migration-guide]: https://docs.aws.amazon.com/streams/latest/dev/kcl-migration-from-previous-versions
|
[changelog-md]: https://github.com/awslabs/amazon-kinesis-client/blob/master/CHANGELOG.md
|
||||||
[kcl-sample]: https://docs.aws.amazon.com/streams/latest/dev/kcl-example-code
|
|
||||||
[kcl-aws-doc]: https://docs.aws.amazon.com/streams/latest/dev/kcl.html
|
|
||||||
[giving-feedback]: https://github.com/awslabs/amazon-kinesis-client?tab=readme-ov-file#giving-feedback
|
|
||||||
|
|
|
||||||
|
|
@ -1,189 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!--
|
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
-->
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
|
||||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<parent>
|
|
||||||
<artifactId>amazon-kinesis-client-pom</artifactId>
|
|
||||||
<groupId>software.amazon.kinesis</groupId>
|
|
||||||
<version>3.0.3</version>
|
|
||||||
</parent>
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
|
|
||||||
<artifactId>amazon-kinesis-client-multilang</artifactId>
|
|
||||||
|
|
||||||
<dependencies>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.kinesis</groupId>
|
|
||||||
<artifactId>amazon-kinesis-client</artifactId>
|
|
||||||
<version>${project.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>sts</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.projectlombok</groupId>
|
|
||||||
<artifactId>lombok</artifactId>
|
|
||||||
<version>1.18.28</version>
|
|
||||||
<scope>provided</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>ch.qos.logback</groupId>
|
|
||||||
<artifactId>logback-classic</artifactId>
|
|
||||||
<version>1.3.14</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.beust</groupId>
|
|
||||||
<artifactId>jcommander</artifactId>
|
|
||||||
<version>1.82</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>commons-io</groupId>
|
|
||||||
<artifactId>commons-io</artifactId>
|
|
||||||
<version>2.16.1</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-collections4</artifactId>
|
|
||||||
<version>4.4</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>commons-beanutils</groupId>
|
|
||||||
<artifactId>commons-beanutils</artifactId>
|
|
||||||
<version>1.9.4</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<!-- Test -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.junit.jupiter</groupId>
|
|
||||||
<artifactId>junit-jupiter-api</artifactId>
|
|
||||||
<version>5.11.3</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>junit</groupId>
|
|
||||||
<artifactId>junit</artifactId>
|
|
||||||
<version>4.13.2</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.mockito</groupId>
|
|
||||||
<artifactId>mockito-all</artifactId>
|
|
||||||
<version>1.10.19</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.hamcrest</groupId>
|
|
||||||
<artifactId>hamcrest-all</artifactId>
|
|
||||||
<version>1.3</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<!-- Using older version to be compatible with Java 8 -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.mockito</groupId>
|
|
||||||
<artifactId>mockito-junit-jupiter</artifactId>
|
|
||||||
<version>3.12.4</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
</dependencies>
|
|
||||||
|
|
||||||
<build>
|
|
||||||
<pluginManagement>
|
|
||||||
<plugins>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-compiler-plugin</artifactId>
|
|
||||||
<version>3.13.0</version>
|
|
||||||
<configuration>
|
|
||||||
<release>8</release>
|
|
||||||
<encoding>UTF-8</encoding>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
|
||||||
</pluginManagement>
|
|
||||||
<plugins>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-javadoc-plugin</artifactId>
|
|
||||||
<version>3.7.0</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>attach-javadocs</id>
|
|
||||||
<goals>
|
|
||||||
<goal>jar</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-source-plugin</artifactId>
|
|
||||||
<version>3.2.1</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>attach-sources</id>
|
|
||||||
<goals>
|
|
||||||
<goal>jar</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>com.diffplug.spotless</groupId>
|
|
||||||
<artifactId>spotless-maven-plugin</artifactId>
|
|
||||||
<version>2.30.0</version> <!--last version to support java 8-->
|
|
||||||
<configuration>
|
|
||||||
<java>
|
|
||||||
<palantirJavaFormat />
|
|
||||||
<importOrder>
|
|
||||||
<order>java,,\#</order>
|
|
||||||
</importOrder>
|
|
||||||
</java>
|
|
||||||
</configuration>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<goals>
|
|
||||||
<goal>check</goal>
|
|
||||||
</goals>
|
|
||||||
<phase>compile</phase>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
|
||||||
|
|
||||||
</build>
|
|
||||||
|
|
||||||
<profiles>
|
|
||||||
<profile>
|
|
||||||
<id>disable-java8-doclint</id>
|
|
||||||
<activation>
|
|
||||||
<jdk>[1.8,)</jdk>
|
|
||||||
</activation>
|
|
||||||
<properties>
|
|
||||||
<doclint>none</doclint>
|
|
||||||
</properties>
|
|
||||||
</profile>
|
|
||||||
</profiles>
|
|
||||||
|
|
||||||
|
|
||||||
</project>
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,235 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.concurrent.Callable;
|
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Future;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.TimeoutException;
|
|
||||||
|
|
||||||
import ch.qos.logback.classic.LoggerContext;
|
|
||||||
import ch.qos.logback.classic.joran.JoranConfigurator;
|
|
||||||
import ch.qos.logback.core.joran.spi.JoranException;
|
|
||||||
import com.beust.jcommander.JCommander;
|
|
||||||
import com.beust.jcommander.Parameter;
|
|
||||||
import lombok.Data;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.commons.collections4.CollectionUtils;
|
|
||||||
import org.apache.commons.io.FileUtils;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
import software.amazon.kinesis.coordinator.Scheduler;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Main app that launches the scheduler that runs the multi-language record processor.
|
|
||||||
*
|
|
||||||
* Requires a properties file containing configuration for this daemon and the KCL. A properties file should at minimum
|
|
||||||
* define these properties:
|
|
||||||
*
|
|
||||||
* <pre>
|
|
||||||
* # The script that abides by the multi-language protocol. This script will
|
|
||||||
* # be executed by the MultiLangDaemon, which will communicate with this script
|
|
||||||
* # over STDIN and STDOUT according to the multi-language protocol.
|
|
||||||
* executableName = sampleapp.py
|
|
||||||
*
|
|
||||||
* # The name of an Amazon Kinesis stream to process.
|
|
||||||
* streamName = words
|
|
||||||
*
|
|
||||||
* # Used by the KCL as the name of this application. Will be used as the name
|
|
||||||
* # of a Amazon DynamoDB table which will store the lease and checkpoint
|
|
||||||
* # information for workers with this application name.
|
|
||||||
* applicationName = PythonKCLSample
|
|
||||||
*
|
|
||||||
* # Users can change the credentials provider the KCL will use to retrieve credentials.
|
|
||||||
* # The DefaultCredentialsProvider checks several other providers, which is
|
|
||||||
* # described here:
|
|
||||||
* # https://sdk.amazonaws.com/java/api/2.0.0-preview-11/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.html
|
|
||||||
* AwsCredentialsProvider = DefaultCredentialsProvider
|
|
||||||
* </pre>
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
public class MultiLangDaemon {
|
|
||||||
static class MultiLangDaemonArguments {
|
|
||||||
@Parameter
|
|
||||||
List<String> parameters = new ArrayList<>();
|
|
||||||
|
|
||||||
@Parameter(
|
|
||||||
names = {"-p", "--properties-file"},
|
|
||||||
description = "Properties file to be used with the KCL")
|
|
||||||
String propertiesFile;
|
|
||||||
|
|
||||||
@Parameter(
|
|
||||||
names = {"-l", "--log-configuration"},
|
|
||||||
description = "File location of logback.xml to be override the default")
|
|
||||||
String logConfiguration;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Data
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
static class MultiLangRunner implements Callable<Integer> {
|
|
||||||
private final Scheduler scheduler;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Integer call() throws Exception {
|
|
||||||
int exitCode = 0;
|
|
||||||
try {
|
|
||||||
scheduler().run();
|
|
||||||
} catch (Throwable t) {
|
|
||||||
log.error("Caught throwable while processing data", t);
|
|
||||||
exitCode = 1;
|
|
||||||
}
|
|
||||||
return exitCode;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
JCommander buildJCommanderAndParseArgs(final MultiLangDaemonArguments arguments, final String[] args) {
|
|
||||||
JCommander jCommander = JCommander.newBuilder()
|
|
||||||
.programName("amazon-kinesis-client MultiLangDaemon")
|
|
||||||
.addObject(arguments)
|
|
||||||
.build();
|
|
||||||
jCommander.parse(args);
|
|
||||||
return jCommander;
|
|
||||||
}
|
|
||||||
|
|
||||||
void printUsage(final JCommander jCommander, final String message) {
|
|
||||||
if (StringUtils.isNotEmpty(message)) {
|
|
||||||
System.err.println(message);
|
|
||||||
}
|
|
||||||
jCommander.usage();
|
|
||||||
}
|
|
||||||
|
|
||||||
Scheduler buildScheduler(final MultiLangDaemonConfig config) {
|
|
||||||
return config.getMultiLangDaemonConfiguration().build(config.getRecordProcessorFactory());
|
|
||||||
}
|
|
||||||
|
|
||||||
void configureLogging(final String logConfiguration) {
|
|
||||||
if (StringUtils.isNotEmpty(logConfiguration)) {
|
|
||||||
LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
|
|
||||||
JoranConfigurator configurator = new JoranConfigurator();
|
|
||||||
configureLogging(logConfiguration, loggerContext, configurator);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void configureLogging(
|
|
||||||
final String logConfiguration, final LoggerContext loggerContext, final JoranConfigurator configurator) {
|
|
||||||
loggerContext.reset();
|
|
||||||
try (InputStream inputStream = FileUtils.openInputStream(new File(logConfiguration))) {
|
|
||||||
configurator.setContext(loggerContext);
|
|
||||||
configurator.doConfigure(inputStream);
|
|
||||||
} catch (IOException | JoranException e) {
|
|
||||||
throw new RuntimeException("Error while loading log configuration: " + e.getMessage());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
String validateAndGetPropertiesFileName(final MultiLangDaemonArguments arguments) {
|
|
||||||
String propertiesFile = "";
|
|
||||||
|
|
||||||
if (CollectionUtils.isNotEmpty(arguments.parameters)) {
|
|
||||||
if (arguments.parameters.size() == 1) {
|
|
||||||
propertiesFile = arguments.parameters.get(0);
|
|
||||||
} else {
|
|
||||||
throw new RuntimeException("Expected a single argument, but found multiple arguments. Arguments: "
|
|
||||||
+ String.join(", ", arguments.parameters));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (StringUtils.isNotEmpty(arguments.propertiesFile)) {
|
|
||||||
if (StringUtils.isNotEmpty(propertiesFile)) {
|
|
||||||
log.warn("Overriding the properties file with the --properties-file option");
|
|
||||||
}
|
|
||||||
propertiesFile = arguments.propertiesFile;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (StringUtils.isEmpty(propertiesFile)) {
|
|
||||||
throw new RuntimeException("Properties file missing, please provide a properties file");
|
|
||||||
}
|
|
||||||
|
|
||||||
return propertiesFile;
|
|
||||||
}
|
|
||||||
|
|
||||||
MultiLangDaemonConfig buildMultiLangDaemonConfig(final String propertiesFile) {
|
|
||||||
try {
|
|
||||||
return new MultiLangDaemonConfig(propertiesFile);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new RuntimeException("Error while reading properties file: " + e.getMessage());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void setupShutdownHook(final Runtime runtime, final MultiLangRunner runner, final MultiLangDaemonConfig config) {
|
|
||||||
long shutdownGraceMillis = config.getMultiLangDaemonConfiguration().getShutdownGraceMillis();
|
|
||||||
runtime.addShutdownHook(new Thread(() -> {
|
|
||||||
log.info("Process terminated, will initiate shutdown.");
|
|
||||||
try {
|
|
||||||
Future<Boolean> runnerFuture = runner.scheduler().startGracefulShutdown();
|
|
||||||
runnerFuture.get(shutdownGraceMillis, TimeUnit.MILLISECONDS);
|
|
||||||
log.info("Process shutdown is complete.");
|
|
||||||
} catch (InterruptedException | ExecutionException | TimeoutException e) {
|
|
||||||
log.error("Encountered an error during shutdown.", e);
|
|
||||||
}
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
int submitRunnerAndWait(final MultiLangDaemonConfig config, final MultiLangRunner runner) {
|
|
||||||
ExecutorService executorService = config.getExecutorService();
|
|
||||||
Future<Integer> future = executorService.submit(runner);
|
|
||||||
|
|
||||||
try {
|
|
||||||
return future.get();
|
|
||||||
} catch (InterruptedException | ExecutionException e) {
|
|
||||||
log.error("Encountered an error while running daemon", e);
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
void exit(final int exitCode) {
|
|
||||||
System.exit(exitCode);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param args
|
|
||||||
* Accepts a single argument, that argument is a properties file which provides KCL configuration as
|
|
||||||
* well as the name of an executable.
|
|
||||||
*/
|
|
||||||
public static void main(final String[] args) {
|
|
||||||
int exitCode = 1;
|
|
||||||
MultiLangDaemon daemon = new MultiLangDaemon();
|
|
||||||
MultiLangDaemonArguments arguments = new MultiLangDaemonArguments();
|
|
||||||
JCommander jCommander = daemon.buildJCommanderAndParseArgs(arguments, args);
|
|
||||||
try {
|
|
||||||
String propertiesFileName = daemon.validateAndGetPropertiesFileName(arguments);
|
|
||||||
daemon.configureLogging(arguments.logConfiguration);
|
|
||||||
MultiLangDaemonConfig config = daemon.buildMultiLangDaemonConfig(propertiesFileName);
|
|
||||||
|
|
||||||
Scheduler scheduler = daemon.buildScheduler(config);
|
|
||||||
MultiLangRunner runner = new MultiLangRunner(scheduler);
|
|
||||||
|
|
||||||
daemon.setupShutdownHook(Runtime.getRuntime(), runner, config);
|
|
||||||
exitCode = daemon.submitRunnerAndWait(config, runner);
|
|
||||||
} catch (Throwable t) {
|
|
||||||
t.printStackTrace(System.err);
|
|
||||||
daemon.printUsage(jCommander, t.getMessage());
|
|
||||||
System.err.println("For more information, visit: https://github.com/awslabs/amazon-kinesis-client");
|
|
||||||
}
|
|
||||||
daemon.exit(exitCode);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,224 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.FileInputStream;
|
|
||||||
import java.io.FileNotFoundException;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.util.Properties;
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
|
||||||
import java.util.concurrent.SynchronousQueue;
|
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.kinesis.multilang.config.KinesisClientLibConfigurator;
|
|
||||||
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
|
||||||
import software.amazon.kinesis.retrieval.RetrievalConfig;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class captures the configuration needed to run the MultiLangDaemon.
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
public class MultiLangDaemonConfig {
|
|
||||||
private static final String USER_AGENT = "amazon-kinesis-multi-lang-daemon";
|
|
||||||
private static final String VERSION = "1.0.1";
|
|
||||||
|
|
||||||
private static final String PROP_EXECUTABLE_NAME = "executableName";
|
|
||||||
private static final String PROP_PROCESSING_LANGUAGE = "processingLanguage";
|
|
||||||
private static final String PROP_MAX_ACTIVE_THREADS = "maxActiveThreads";
|
|
||||||
|
|
||||||
private final MultiLangDaemonConfiguration multiLangDaemonConfiguration;
|
|
||||||
|
|
||||||
private final ExecutorService executorService;
|
|
||||||
|
|
||||||
private final MultiLangRecordProcessorFactory recordProcessorFactory;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor.
|
|
||||||
*
|
|
||||||
* @param propertiesFile
|
|
||||||
* The location of the properties file.
|
|
||||||
* @throws IOException
|
|
||||||
* Thrown when the properties file can't be accessed.
|
|
||||||
* @throws IllegalArgumentException
|
|
||||||
* Thrown when the contents of the properties file are not as expected.
|
|
||||||
*/
|
|
||||||
public MultiLangDaemonConfig(String propertiesFile) throws IOException, IllegalArgumentException {
|
|
||||||
this(propertiesFile, Thread.currentThread().getContextClassLoader());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @param propertiesFile
|
|
||||||
* The location of the properties file.
|
|
||||||
* @param classLoader
|
|
||||||
* A classloader, useful if trying to programmatically configure with the daemon, such as in a unit test.
|
|
||||||
* @throws IOException
|
|
||||||
* Thrown when the properties file can't be accessed.
|
|
||||||
* @throws IllegalArgumentException
|
|
||||||
* Thrown when the contents of the properties file are not as expected.
|
|
||||||
*/
|
|
||||||
public MultiLangDaemonConfig(String propertiesFile, ClassLoader classLoader)
|
|
||||||
throws IOException, IllegalArgumentException {
|
|
||||||
this(propertiesFile, classLoader, new KinesisClientLibConfigurator());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @param propertiesFile
|
|
||||||
* The location of the properties file.
|
|
||||||
* @param classLoader
|
|
||||||
* A classloader, useful if trying to programmatically configure with the daemon, such as in a unit test.
|
|
||||||
* @param configurator
|
|
||||||
* A configurator to use.
|
|
||||||
* @throws IOException
|
|
||||||
* Thrown when the properties file can't be accessed.
|
|
||||||
* @throws IllegalArgumentException
|
|
||||||
* Thrown when the contents of the properties file are not as expected.
|
|
||||||
*/
|
|
||||||
public MultiLangDaemonConfig(
|
|
||||||
String propertiesFile, ClassLoader classLoader, KinesisClientLibConfigurator configurator)
|
|
||||||
throws IOException, IllegalArgumentException {
|
|
||||||
Properties properties = loadProperties(classLoader, propertiesFile);
|
|
||||||
if (!validateProperties(properties)) {
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"Must provide an executable name in the properties file, " + "e.g. executableName = sampleapp.py");
|
|
||||||
}
|
|
||||||
|
|
||||||
String executableName = properties.getProperty(PROP_EXECUTABLE_NAME);
|
|
||||||
String processingLanguage = properties.getProperty(PROP_PROCESSING_LANGUAGE);
|
|
||||||
|
|
||||||
multiLangDaemonConfiguration = configurator.getConfiguration(properties);
|
|
||||||
executorService = buildExecutorService(properties);
|
|
||||||
recordProcessorFactory =
|
|
||||||
new MultiLangRecordProcessorFactory(executableName, executorService, multiLangDaemonConfiguration);
|
|
||||||
|
|
||||||
log.info(
|
|
||||||
"Running {} to process stream {} with executable {}",
|
|
||||||
multiLangDaemonConfiguration.getApplicationName(),
|
|
||||||
multiLangDaemonConfiguration.getStreamName(),
|
|
||||||
executableName);
|
|
||||||
prepare(processingLanguage);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void prepare(String processingLanguage) {
|
|
||||||
// Ensure the JVM will refresh the cached IP values of AWS resources (e.g. service endpoints).
|
|
||||||
java.security.Security.setProperty("networkaddress.cache.ttl", "60");
|
|
||||||
|
|
||||||
log.info("Using workerId: {}", multiLangDaemonConfiguration.getWorkerIdentifier());
|
|
||||||
|
|
||||||
StringBuilder userAgent = new StringBuilder(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT);
|
|
||||||
userAgent.append(" ");
|
|
||||||
userAgent.append(USER_AGENT);
|
|
||||||
userAgent.append("/");
|
|
||||||
userAgent.append(VERSION);
|
|
||||||
|
|
||||||
if (processingLanguage != null) {
|
|
||||||
userAgent.append(" ");
|
|
||||||
userAgent.append(processingLanguage);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (recordProcessorFactory.getCommandArray().length > 0) {
|
|
||||||
userAgent.append(" ");
|
|
||||||
userAgent.append(recordProcessorFactory.getCommandArray()[0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info("MultiLangDaemon is adding the following fields to the User Agent: {}", userAgent.toString());
|
|
||||||
// multiLangDaemonConfiguration.withUserAgent(userAgent.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Properties loadProperties(ClassLoader classLoader, String propertiesFileName) throws IOException {
|
|
||||||
Properties properties = new Properties();
|
|
||||||
InputStream propertyStream = null;
|
|
||||||
try {
|
|
||||||
propertyStream = classLoader.getResourceAsStream(propertiesFileName);
|
|
||||||
if (propertyStream == null) {
|
|
||||||
File propertyFile = new File(propertiesFileName);
|
|
||||||
if (propertyFile.exists()) {
|
|
||||||
propertyStream = new FileInputStream(propertyFile);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (propertyStream == null) {
|
|
||||||
throw new FileNotFoundException(
|
|
||||||
"Unable to find property file in classpath, or file system: '" + propertiesFileName + "'");
|
|
||||||
}
|
|
||||||
|
|
||||||
properties.load(propertyStream);
|
|
||||||
return properties;
|
|
||||||
} finally {
|
|
||||||
if (propertyStream != null) {
|
|
||||||
propertyStream.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean validateProperties(Properties properties) {
|
|
||||||
return properties != null && properties.getProperty(PROP_EXECUTABLE_NAME) != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static int getMaxActiveThreads(Properties properties) {
|
|
||||||
return Integer.parseInt(properties.getProperty(PROP_MAX_ACTIVE_THREADS, "0"));
|
|
||||||
}
|
|
||||||
|
|
||||||
private static ExecutorService buildExecutorService(Properties properties) {
|
|
||||||
int maxActiveThreads = getMaxActiveThreads(properties);
|
|
||||||
ThreadFactoryBuilder builder = new ThreadFactoryBuilder().setNameFormat("multi-lang-daemon-%04d");
|
|
||||||
log.debug("Value for {} property is {}", PROP_MAX_ACTIVE_THREADS, maxActiveThreads);
|
|
||||||
if (maxActiveThreads <= 0) {
|
|
||||||
log.info("Using a cached thread pool.");
|
|
||||||
return new ThreadPoolExecutor(
|
|
||||||
0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<>(), builder.build());
|
|
||||||
} else {
|
|
||||||
log.info("Using a fixed thread pool with {} max active threads.", maxActiveThreads);
|
|
||||||
return new ThreadPoolExecutor(
|
|
||||||
maxActiveThreads,
|
|
||||||
maxActiveThreads,
|
|
||||||
0L,
|
|
||||||
TimeUnit.MILLISECONDS,
|
|
||||||
new LinkedBlockingQueue<>(),
|
|
||||||
builder.build());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @return A KinesisClientLibConfiguration object based on the properties file provided.
|
|
||||||
*/
|
|
||||||
public MultiLangDaemonConfiguration getMultiLangDaemonConfiguration() {
|
|
||||||
return multiLangDaemonConfiguration;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @return An executor service based on the properties file provided.
|
|
||||||
*/
|
|
||||||
public ExecutorService getExecutorService() {
|
|
||||||
return executorService;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @return A MultiLangRecordProcessorFactory based on the properties file provided.
|
|
||||||
*/
|
|
||||||
public MultiLangRecordProcessorFactory getRecordProcessorFactory() {
|
|
||||||
return recordProcessorFactory;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,148 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang;
|
|
||||||
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import com.google.common.base.CaseFormat;
|
|
||||||
import lombok.AccessLevel;
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.awssdk.regions.Region;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Key-Value pairs which may be nested in, and extracted from, a property value
|
|
||||||
* in a Java properties file. For example, given the line in a property file of
|
|
||||||
* {@code my_key = my_value|foo=bar} and a delimiter split on {@code |} (pipe),
|
|
||||||
* the value {@code my_value|foo=bar} would have a nested key of {@code foo}
|
|
||||||
* and its corresponding value is {@code bar}.
|
|
||||||
* <br/><br/>
|
|
||||||
* The order of nested properties does not matter, and these properties are optional.
|
|
||||||
* Customers may choose to provide, in any order, zero-or-more nested properties.
|
|
||||||
* <br/><br/>
|
|
||||||
* Duplicate keys are not supported, and may result in a last-write-wins outcome.
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
public enum NestedPropertyKey {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Specify the service endpoint where requests will be submitted.
|
|
||||||
* This property's value must be in the following format:
|
|
||||||
* <pre>
|
|
||||||
* ENDPOINT ::= SERVICE_ENDPOINT "^" SIGNING_REGION
|
|
||||||
* SERVICE_ENDPOINT ::= URL
|
|
||||||
* SIGNING_REGION ::= AWS_REGION
|
|
||||||
* </pre>
|
|
||||||
*
|
|
||||||
* It would be redundant to provide both this and {@link #ENDPOINT_REGION}.
|
|
||||||
*
|
|
||||||
* @see #ENDPOINT_REGION
|
|
||||||
* @see <a href="https://docs.aws.amazon.com/general/latest/gr/rande.html">AWS Service endpoints</a>
|
|
||||||
* @see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-regions">Available Regions</a>
|
|
||||||
*/
|
|
||||||
ENDPOINT {
|
|
||||||
void visit(final NestedPropertyProcessor processor, final String endpoint) {
|
|
||||||
final String[] tokens = endpoint.split("\\^");
|
|
||||||
if (tokens.length != 2) {
|
|
||||||
throw new IllegalArgumentException("Invalid " + name() + ": " + endpoint);
|
|
||||||
}
|
|
||||||
processor.acceptEndpoint(tokens[0], tokens[1]);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Specify the region where service requests will be submitted. This
|
|
||||||
* region will determine both the service endpoint and signing region.
|
|
||||||
* <br/><br/>
|
|
||||||
* It would be redundant to provide both this and {@link #ENDPOINT}.
|
|
||||||
*
|
|
||||||
* @see #ENDPOINT
|
|
||||||
* @see <a href="https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-regions">Available Regions</a>
|
|
||||||
*/
|
|
||||||
ENDPOINT_REGION {
|
|
||||||
void visit(final NestedPropertyProcessor processor, final String regionName) {
|
|
||||||
List<Region> validRegions = Region.regions();
|
|
||||||
Region region = Region.of(regionName);
|
|
||||||
if (!validRegions.contains(region)) {
|
|
||||||
throw new IllegalArgumentException("Invalid region name: " + regionName);
|
|
||||||
}
|
|
||||||
processor.acceptEndpointRegion(region);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
/**
|
|
||||||
* External ids may be used when delegating access in a multi-tenant
|
|
||||||
* environment, or to third parties.
|
|
||||||
*
|
|
||||||
* @see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html">
|
|
||||||
* How to use an external ID when granting access to your AWS resources to a third party</a>
|
|
||||||
*/
|
|
||||||
EXTERNAL_ID {
|
|
||||||
void visit(final NestedPropertyProcessor processor, final String externalId) {
|
|
||||||
processor.acceptExternalId(externalId);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Nested key within the property value. For example, a nested key-value
|
|
||||||
* of {@code foo=bar} has a nested key of {@code foo}.
|
|
||||||
*/
|
|
||||||
@Getter(AccessLevel.PACKAGE)
|
|
||||||
private final String nestedKey;
|
|
||||||
|
|
||||||
NestedPropertyKey() {
|
|
||||||
// convert the enum from UPPER_SNAKE_CASE to lowerCamelCase
|
|
||||||
nestedKey = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, name());
|
|
||||||
}
|
|
||||||
|
|
||||||
abstract void visit(NestedPropertyProcessor processor, String value);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parses any number of parameters. Each nested property will prompt a
|
|
||||||
* visit to the {@code processor}.
|
|
||||||
*
|
|
||||||
* @param processor processor to be invoked for every nested property
|
|
||||||
* @param params parameters to check for a nested property key
|
|
||||||
*/
|
|
||||||
public static void parse(final NestedPropertyProcessor processor, final String... params) {
|
|
||||||
// Construct a disposable cache to keep this O(n). Since parsing is
|
|
||||||
// usually one-and-done, it's wasteful to maintain this cache in perpetuity.
|
|
||||||
final Map<String, NestedPropertyKey> cachedKeys = new HashMap<>();
|
|
||||||
for (final NestedPropertyKey npk : values()) {
|
|
||||||
cachedKeys.put(npk.getNestedKey(), npk);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (final String param : params) {
|
|
||||||
if (param != null) {
|
|
||||||
final String[] tokens = param.split("=");
|
|
||||||
if (tokens.length == 2) {
|
|
||||||
final NestedPropertyKey npk = cachedKeys.get(tokens[0]);
|
|
||||||
if (npk != null) {
|
|
||||||
npk.visit(processor, tokens[1]);
|
|
||||||
} else {
|
|
||||||
log.warn("Unsupported nested key: {}", param);
|
|
||||||
}
|
|
||||||
} else if (tokens.length > 2) {
|
|
||||||
log.warn("Malformed nested key: {}", param);
|
|
||||||
} else {
|
|
||||||
log.info("Parameter is not a nested key: {}", param);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang;
|
|
||||||
|
|
||||||
import software.amazon.awssdk.regions.Region;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Defines methods to process {@link NestedPropertyKey}s.
|
|
||||||
*/
|
|
||||||
public interface NestedPropertyProcessor {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the service endpoint where requests are sent.
|
|
||||||
*
|
|
||||||
* @param serviceEndpoint the service endpoint either with or without the protocol
|
|
||||||
* (e.g., https://sns.us-west-1.amazonaws.com, sns.us-west-1.amazonaws.com)
|
|
||||||
* @param signingRegion the region to use for the client (e.g. us-west-1)
|
|
||||||
*
|
|
||||||
* @see #acceptEndpointRegion(Region)
|
|
||||||
* @see <a href="https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/core/client/builder/SdkClientBuilder.html#endpointOverride(java.net.URI)">
|
|
||||||
* AwsClientBuilder.endpointOverride</a>
|
|
||||||
*/
|
|
||||||
void acceptEndpoint(String serviceEndpoint, String signingRegion);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the service endpoint where requests are sent.
|
|
||||||
*
|
|
||||||
* @param region Region to be used by the client. This will be used to determine both the service endpoint
|
|
||||||
* (e.g., https://sns.us-west-1.amazonaws.com) and signing region (e.g., us-west-1) for requests.
|
|
||||||
*
|
|
||||||
* @see #acceptEndpoint(String, String)
|
|
||||||
*/
|
|
||||||
void acceptEndpointRegion(Region region);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the external id, an optional field to designate who can assume an IAM role.
|
|
||||||
*
|
|
||||||
* @param externalId external id used in the service call used to retrieve session credentials
|
|
||||||
*/
|
|
||||||
void acceptExternalId(String externalId);
|
|
||||||
}
|
|
||||||
|
|
@ -1,75 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang.auth;
|
|
||||||
|
|
||||||
import java.net.URI;
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentials;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.regions.Region;
|
|
||||||
import software.amazon.awssdk.services.sts.StsClient;
|
|
||||||
import software.amazon.awssdk.services.sts.StsClientBuilder;
|
|
||||||
import software.amazon.awssdk.services.sts.auth.StsAssumeRoleCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.services.sts.model.AssumeRoleRequest;
|
|
||||||
import software.amazon.awssdk.services.sts.model.AssumeRoleRequest.Builder;
|
|
||||||
import software.amazon.kinesis.multilang.NestedPropertyKey;
|
|
||||||
import software.amazon.kinesis.multilang.NestedPropertyProcessor;
|
|
||||||
|
|
||||||
public class KclStsAssumeRoleCredentialsProvider implements AwsCredentialsProvider, NestedPropertyProcessor {
|
|
||||||
private final Builder assumeRoleRequestBuilder;
|
|
||||||
private final StsClientBuilder stsClientBuilder;
|
|
||||||
private final StsAssumeRoleCredentialsProvider stsAssumeRoleCredentialsProvider;
|
|
||||||
|
|
||||||
public KclStsAssumeRoleCredentialsProvider(String[] params) {
|
|
||||||
this(params[0], params[1], Arrays.copyOfRange(params, 2, params.length));
|
|
||||||
}
|
|
||||||
|
|
||||||
public KclStsAssumeRoleCredentialsProvider(String roleArn, String roleSessionName, String... params) {
|
|
||||||
this.assumeRoleRequestBuilder =
|
|
||||||
AssumeRoleRequest.builder().roleArn(roleArn).roleSessionName(roleSessionName);
|
|
||||||
this.stsClientBuilder = StsClient.builder();
|
|
||||||
NestedPropertyKey.parse(this, params);
|
|
||||||
this.stsAssumeRoleCredentialsProvider = StsAssumeRoleCredentialsProvider.builder()
|
|
||||||
.refreshRequest(assumeRoleRequestBuilder.build())
|
|
||||||
.asyncCredentialUpdateEnabled(true)
|
|
||||||
.stsClient(stsClientBuilder.build())
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
return stsAssumeRoleCredentialsProvider.resolveCredentials();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void acceptEndpoint(String serviceEndpoint, String signingRegion) {
|
|
||||||
if (!serviceEndpoint.startsWith("http://") && !serviceEndpoint.startsWith("https://")) {
|
|
||||||
serviceEndpoint = "https://" + serviceEndpoint;
|
|
||||||
}
|
|
||||||
stsClientBuilder.endpointOverride(URI.create(serviceEndpoint));
|
|
||||||
stsClientBuilder.region(Region.of(signingRegion));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void acceptEndpointRegion(Region region) {
|
|
||||||
stsClientBuilder.region(region);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void acceptExternalId(String externalId) {
|
|
||||||
assumeRoleRequestBuilder.externalId(externalId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,261 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
|
||||||
import java.lang.reflect.Method;
|
|
||||||
import java.lang.reflect.Modifier;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.stream.Stream;
|
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain;
|
|
||||||
import software.amazon.awssdk.services.sts.auth.StsAssumeRoleCredentialsProvider;
|
|
||||||
import software.amazon.kinesis.multilang.auth.KclStsAssumeRoleCredentialsProvider;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get AwsCredentialsProvider property.
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
class AwsCredentialsProviderPropertyValueDecoder implements IPropertyValueDecoder<AwsCredentialsProvider> {
|
|
||||||
private static final String LIST_DELIMITER = ",";
|
|
||||||
private static final String ARG_DELIMITER = "|";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor.
|
|
||||||
*/
|
|
||||||
AwsCredentialsProviderPropertyValueDecoder() {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get AwsCredentialsProvider property.
|
|
||||||
*
|
|
||||||
* @param value
|
|
||||||
* property value as String
|
|
||||||
* @return corresponding variable in correct type
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public AwsCredentialsProvider decodeValue(String value) {
|
|
||||||
if (value != null) {
|
|
||||||
List<String> providerNames = getProviderNames(value);
|
|
||||||
List<AwsCredentialsProvider> providers = getValidCredentialsProviders(providerNames);
|
|
||||||
AwsCredentialsProvider[] ps = new AwsCredentialsProvider[providers.size()];
|
|
||||||
providers.toArray(ps);
|
|
||||||
if (providers.isEmpty()) {
|
|
||||||
log.warn("Unable to construct any provider with name {}", value);
|
|
||||||
log.warn("Please verify that all AwsCredentialsProvider properties are passed correctly");
|
|
||||||
}
|
|
||||||
return AwsCredentialsProviderChain.builder()
|
|
||||||
.credentialsProviders(providers)
|
|
||||||
.build();
|
|
||||||
} else {
|
|
||||||
throw new IllegalArgumentException("Property AwsCredentialsProvider is missing.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return list of supported types
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public List<Class<AwsCredentialsProvider>> getSupportedTypes() {
|
|
||||||
return Collections.singletonList(AwsCredentialsProvider.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert string list to a list of valid credentials providers.
|
|
||||||
*/
|
|
||||||
private static List<AwsCredentialsProvider> getValidCredentialsProviders(List<String> providerNames) {
|
|
||||||
List<AwsCredentialsProvider> credentialsProviders = new ArrayList<>();
|
|
||||||
|
|
||||||
for (String providerName : providerNames) {
|
|
||||||
final String[] nameAndArgs = providerName.split("\\" + ARG_DELIMITER);
|
|
||||||
final Class<? extends AwsCredentialsProvider> clazz = getClass(nameAndArgs[0]);
|
|
||||||
if (clazz == null) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
log.info("Attempting to construct {}", clazz);
|
|
||||||
final String[] varargs =
|
|
||||||
nameAndArgs.length > 1 ? Arrays.copyOfRange(nameAndArgs, 1, nameAndArgs.length) : new String[0];
|
|
||||||
AwsCredentialsProvider provider = tryConstructor(providerName, clazz, varargs);
|
|
||||||
if (provider == null) {
|
|
||||||
provider = tryCreate(providerName, clazz, varargs);
|
|
||||||
}
|
|
||||||
if (provider != null) {
|
|
||||||
log.info("Provider constructed successfully: {}", provider);
|
|
||||||
credentialsProviders.add(provider);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return credentialsProviders;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static AwsCredentialsProvider tryConstructor(
|
|
||||||
String providerName, Class<? extends AwsCredentialsProvider> clazz, String[] varargs) {
|
|
||||||
AwsCredentialsProvider provider =
|
|
||||||
constructProvider(providerName, () -> getConstructorWithVarArgs(clazz, varargs));
|
|
||||||
if (provider == null) {
|
|
||||||
provider = constructProvider(providerName, () -> getConstructorWithArgs(clazz, varargs));
|
|
||||||
}
|
|
||||||
if (provider == null) {
|
|
||||||
provider = constructProvider(providerName, clazz::newInstance);
|
|
||||||
}
|
|
||||||
return provider;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static AwsCredentialsProvider tryCreate(
|
|
||||||
String providerName, Class<? extends AwsCredentialsProvider> clazz, String[] varargs) {
|
|
||||||
AwsCredentialsProvider provider =
|
|
||||||
constructProvider(providerName, () -> getCreateMethod(clazz, (Object) varargs));
|
|
||||||
if (provider == null) {
|
|
||||||
provider = constructProvider(providerName, () -> getCreateMethod(clazz, varargs));
|
|
||||||
}
|
|
||||||
if (provider == null) {
|
|
||||||
provider = constructProvider(providerName, () -> getCreateMethod(clazz));
|
|
||||||
}
|
|
||||||
return provider;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static AwsCredentialsProvider getConstructorWithVarArgs(
|
|
||||||
Class<? extends AwsCredentialsProvider> clazz, String[] varargs) {
|
|
||||||
try {
|
|
||||||
return clazz.getConstructor(String[].class).newInstance((Object) varargs);
|
|
||||||
} catch (Exception e) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static AwsCredentialsProvider getConstructorWithArgs(
|
|
||||||
Class<? extends AwsCredentialsProvider> clazz, String[] varargs) {
|
|
||||||
try {
|
|
||||||
Class<?>[] argTypes = new Class<?>[varargs.length];
|
|
||||||
Arrays.fill(argTypes, String.class);
|
|
||||||
return clazz.getConstructor(argTypes).newInstance((Object[]) varargs);
|
|
||||||
} catch (Exception e) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static AwsCredentialsProvider getCreateMethod(
|
|
||||||
Class<? extends AwsCredentialsProvider> clazz, Object... args) {
|
|
||||||
try {
|
|
||||||
Class<?>[] argTypes = new Class<?>[args.length];
|
|
||||||
for (int i = 0; i < args.length; i++) {
|
|
||||||
argTypes[i] = args[i].getClass();
|
|
||||||
}
|
|
||||||
Method createMethod = clazz.getDeclaredMethod("create", argTypes);
|
|
||||||
if (Modifier.isStatic(createMethod.getModifiers())) {
|
|
||||||
return clazz.cast(createMethod.invoke(null, args));
|
|
||||||
} else {
|
|
||||||
log.warn("Found non-static create() method in {}", clazz.getName());
|
|
||||||
}
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
// No matching create method found for class
|
|
||||||
} catch (Exception e) {
|
|
||||||
log.warn("Failed to invoke create() method in {}", clazz.getName(), e);
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Resolves the class for the given provider name.
|
|
||||||
*
|
|
||||||
* @param providerName A string containing the provider name.
|
|
||||||
*
|
|
||||||
* @return The Class object representing the resolved AwsCredentialsProvider implementation,
|
|
||||||
* or null if the class cannot be resolved or does not extend AwsCredentialsProvider.
|
|
||||||
*/
|
|
||||||
private static Class<? extends AwsCredentialsProvider> getClass(String providerName) {
|
|
||||||
// Convert any form of StsAssumeRoleCredentialsProvider string to KclStsAssumeRoleCredentialsProvider
|
|
||||||
if (providerName.equals(StsAssumeRoleCredentialsProvider.class.getSimpleName())
|
|
||||||
|| providerName.equals(StsAssumeRoleCredentialsProvider.class.getName())) {
|
|
||||||
providerName = KclStsAssumeRoleCredentialsProvider.class.getName();
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
final Class<?> c = Class.forName(providerName);
|
|
||||||
if (!AwsCredentialsProvider.class.isAssignableFrom(c)) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return (Class<? extends AwsCredentialsProvider>) c;
|
|
||||||
} catch (ClassNotFoundException cnfe) {
|
|
||||||
// Providers are a product of prefixed Strings to cover multiple
|
|
||||||
// namespaces (e.g., "Foo" -> { "some.auth.Foo", "kcl.auth.Foo" }).
|
|
||||||
// It's expected that many class names will not resolve.
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static List<String> getProviderNames(String property) {
|
|
||||||
// assume list delimiter is ","
|
|
||||||
String[] elements = property.split(LIST_DELIMITER);
|
|
||||||
List<String> result = new ArrayList<>();
|
|
||||||
for (int i = 0; i < elements.length; i++) {
|
|
||||||
String string = elements[i].trim();
|
|
||||||
if (!string.isEmpty()) {
|
|
||||||
// find all possible names and add them to name list
|
|
||||||
result.addAll(getPossibleFullClassNames(string));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static List<String> getPossibleFullClassNames(final String provider) {
|
|
||||||
return Stream.of(
|
|
||||||
// Customer provides a short name of a provider offered by this multi-lang package
|
|
||||||
"software.amazon.kinesis.multilang.auth.",
|
|
||||||
// Customer provides a short name of common providers in software.amazon.awssdk.auth.credentials
|
|
||||||
// package (e.g., any classes implementing the AwsCredentialsProvider interface)
|
|
||||||
// @see
|
|
||||||
// https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/auth/credentials/AwsCredentialsProvider.html
|
|
||||||
"software.amazon.awssdk.auth.credentials.",
|
|
||||||
// Customer provides a fully-qualified provider name, or a custom credentials provider
|
|
||||||
// (e.g., org.mycompany.FooProvider)
|
|
||||||
"")
|
|
||||||
.map(prefix -> prefix + provider)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
@FunctionalInterface
|
|
||||||
private interface CredentialsProviderConstructor<T extends AwsCredentialsProvider> {
|
|
||||||
T construct()
|
|
||||||
throws IllegalAccessException, InstantiationException, InvocationTargetException, NoSuchMethodException;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Attempts to construct an {@link AwsCredentialsProvider}.
|
|
||||||
*
|
|
||||||
* @param providerName Raw, unmodified provider name. Should there be an
|
|
||||||
* Exception during construction, this parameter will be logged.
|
|
||||||
* @param constructor supplier-like function that will perform the construction
|
|
||||||
* @return the constructed provider, if successful; otherwise, null
|
|
||||||
*
|
|
||||||
* @param <T> type of the CredentialsProvider to construct
|
|
||||||
*/
|
|
||||||
private static <T extends AwsCredentialsProvider> T constructProvider(
|
|
||||||
final String providerName, final CredentialsProviderConstructor<T> constructor) {
|
|
||||||
try {
|
|
||||||
return constructor.construct();
|
|
||||||
} catch (NoSuchMethodException
|
|
||||||
| IllegalAccessException
|
|
||||||
| InstantiationException
|
|
||||||
| InvocationTargetException
|
|
||||||
| RuntimeException ignored) {
|
|
||||||
// ignore
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,292 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.function.Function;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.apache.commons.beanutils.DynaBean;
|
|
||||||
import org.apache.commons.beanutils.DynaClass;
|
|
||||||
import org.apache.commons.beanutils.DynaProperty;
|
|
||||||
import org.apache.commons.lang3.ClassUtils;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
|
|
||||||
public class BuilderDynaBean implements DynaBean {
|
|
||||||
|
|
||||||
private static final String[] CLASS_NAME_JOINERS = {ClassUtils.PACKAGE_SEPARATOR, ClassUtils.INNER_CLASS_SEPARATOR};
|
|
||||||
static final String NO_MAP_ACCESS_SUPPORT = "Map access isn't supported";
|
|
||||||
|
|
||||||
private Class<?> destinedClass;
|
|
||||||
private final ConvertUtilsBean convertUtilsBean;
|
|
||||||
private final List<String> classPrefixSearchList;
|
|
||||||
|
|
||||||
private DynaBeanCreateSupport dynaBeanCreateSupport;
|
|
||||||
private DynaBeanBuilderSupport dynaBeanBuilderSupport;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
private boolean isDirty = false;
|
|
||||||
|
|
||||||
private final Function<String, ?> emptyPropertyHandler;
|
|
||||||
private Object emptyPropertyResolved = null;
|
|
||||||
|
|
||||||
public BuilderDynaBean(Class<?> destinedClass, ConvertUtilsBean convertUtilsBean, String... classPrefixSearchList) {
|
|
||||||
this(destinedClass, convertUtilsBean, null, Arrays.asList(classPrefixSearchList));
|
|
||||||
}
|
|
||||||
|
|
||||||
public BuilderDynaBean(
|
|
||||||
Class<?> destinedClass,
|
|
||||||
ConvertUtilsBean convertUtilsBean,
|
|
||||||
Function<String, ?> emptyPropertyHandler,
|
|
||||||
String... classPrefixSearchList) {
|
|
||||||
this(destinedClass, convertUtilsBean, emptyPropertyHandler, Arrays.asList(classPrefixSearchList));
|
|
||||||
}
|
|
||||||
|
|
||||||
public BuilderDynaBean(
|
|
||||||
Class<?> destinedClass,
|
|
||||||
ConvertUtilsBean convertUtilsBean,
|
|
||||||
Function<String, ?> emptyPropertyHandler,
|
|
||||||
List<String> classPrefixSearchList) {
|
|
||||||
this.convertUtilsBean = convertUtilsBean;
|
|
||||||
this.classPrefixSearchList = classPrefixSearchList;
|
|
||||||
this.emptyPropertyHandler = emptyPropertyHandler;
|
|
||||||
initialize(destinedClass);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void initialize(Class<?> destinedClass) {
|
|
||||||
this.destinedClass = destinedClass;
|
|
||||||
|
|
||||||
if (DynaBeanBuilderUtils.isBuilderOrCreate(destinedClass)) {
|
|
||||||
dynaBeanBuilderSupport = new DynaBeanBuilderSupport(destinedClass, convertUtilsBean, classPrefixSearchList);
|
|
||||||
dynaBeanCreateSupport = new DynaBeanCreateSupport(destinedClass, convertUtilsBean, classPrefixSearchList);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void reinitializeFrom(String newClass) {
|
|
||||||
Class<?> newClazz = null;
|
|
||||||
List<String> attempts = new ArrayList<>();
|
|
||||||
attempts.add(newClass);
|
|
||||||
try {
|
|
||||||
newClazz = Class.forName(newClass);
|
|
||||||
} catch (ClassNotFoundException e) {
|
|
||||||
//
|
|
||||||
// Ignored
|
|
||||||
//
|
|
||||||
}
|
|
||||||
if (newClazz == null) {
|
|
||||||
for (String prefix : classPrefixSearchList) {
|
|
||||||
for (String joiner : CLASS_NAME_JOINERS) {
|
|
||||||
String possibleClass;
|
|
||||||
if (prefix.endsWith(joiner)) {
|
|
||||||
possibleClass = prefix + newClass;
|
|
||||||
} else {
|
|
||||||
possibleClass = prefix + joiner + newClass;
|
|
||||||
}
|
|
||||||
attempts.add(possibleClass);
|
|
||||||
try {
|
|
||||||
newClazz = Class.forName(possibleClass);
|
|
||||||
break;
|
|
||||||
} catch (ClassNotFoundException e) {
|
|
||||||
//
|
|
||||||
// Ignored
|
|
||||||
//
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (newClazz == null) {
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"Unable to load class " + newClass + ". Attempted: (" + String.join(", ", attempts) + ")");
|
|
||||||
}
|
|
||||||
initialize(newClazz);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void validatedExpectedClass(Class<?> source, Class<?> expected) {
|
|
||||||
if (!ClassUtils.isAssignable(source, expected)) {
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
String.format("%s cannot be assigned to %s.", source.getName(), expected.getName()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean canBuildOrCreate() {
|
|
||||||
return dynaBeanBuilderSupport != null || dynaBeanCreateSupport != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void validateCanBuildOrCreate() {
|
|
||||||
if (!canBuildOrCreate()) {
|
|
||||||
throw new IllegalStateException("Unable to to introspect or handle " + destinedClass.getName()
|
|
||||||
+ " as it doesn't have a builder or create method.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@SafeVarargs
|
|
||||||
public final <T> T build(Class<T> expected, Function<Object, Object>... additionalMutators) {
|
|
||||||
if (emptyPropertyResolved != null) {
|
|
||||||
validatedExpectedClass(emptyPropertyResolved.getClass(), expected);
|
|
||||||
return expected.cast(emptyPropertyResolved);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dynaBeanBuilderSupport == null && dynaBeanCreateSupport == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
validatedExpectedClass(destinedClass, expected);
|
|
||||||
if (dynaBeanBuilderSupport.isValid()) {
|
|
||||||
return expected.cast(dynaBeanBuilderSupport.build(additionalMutators));
|
|
||||||
} else {
|
|
||||||
return expected.cast(dynaBeanCreateSupport.build());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void validateResolvedEmptyHandler() {
|
|
||||||
if (emptyPropertyResolved != null) {
|
|
||||||
throw new IllegalStateException("When a property handler is resolved further properties may not be set.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean hasValue(String name) {
|
|
||||||
if (dynaBeanBuilderSupport != null) {
|
|
||||||
return dynaBeanBuilderSupport.hasValue(name);
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean contains(String name, String key) {
|
|
||||||
throw new UnsupportedOperationException(NO_MAP_ACCESS_SUPPORT);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Object get(String name) {
|
|
||||||
validateResolvedEmptyHandler();
|
|
||||||
isDirty = true;
|
|
||||||
return dynaBeanBuilderSupport.get(name);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Object get(String name, int index) {
|
|
||||||
validateResolvedEmptyHandler();
|
|
||||||
isDirty = true;
|
|
||||||
if (StringUtils.isEmpty(name)) {
|
|
||||||
return dynaBeanCreateSupport.get(name, index);
|
|
||||||
}
|
|
||||||
return dynaBeanBuilderSupport.get(name, index);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Object get(String name, String key) {
|
|
||||||
throw new UnsupportedOperationException(NO_MAP_ACCESS_SUPPORT);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DynaClass getDynaClass() {
|
|
||||||
return new DynaClass() {
|
|
||||||
@Override
|
|
||||||
public String getName() {
|
|
||||||
return destinedClass.getName();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DynaProperty getDynaProperty(String name) {
|
|
||||||
if (StringUtils.isEmpty(name)) {
|
|
||||||
return new DynaProperty(name);
|
|
||||||
}
|
|
||||||
if ("class".equals(name)) {
|
|
||||||
return new DynaProperty(name, String.class);
|
|
||||||
}
|
|
||||||
//
|
|
||||||
// We delay validation until after the class check to allow for re-initialization for a specific class.
|
|
||||||
// The check for isEmpty is allowed ahead of this check to allow for raw string support.
|
|
||||||
//
|
|
||||||
validateCanBuildOrCreate();
|
|
||||||
List<TypeTag> types = dynaBeanBuilderSupport.getProperty(name);
|
|
||||||
if (types.size() > 1) {
|
|
||||||
Optional<TypeTag> arrayType =
|
|
||||||
types.stream().filter(t -> t.type.isArray()).findFirst();
|
|
||||||
return arrayType
|
|
||||||
.map(t -> new DynaProperty(name, t.type, t.type.getComponentType()))
|
|
||||||
.orElseGet(() -> new DynaProperty(name));
|
|
||||||
} else {
|
|
||||||
TypeTag type = types.get(0);
|
|
||||||
if (type.hasConverter) {
|
|
||||||
return new DynaProperty(name, type.type);
|
|
||||||
}
|
|
||||||
if (type.type.isEnum()) {
|
|
||||||
return new DynaProperty(name, String.class);
|
|
||||||
}
|
|
||||||
return new DynaProperty(name, BuilderDynaBean.class);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DynaProperty[] getDynaProperties() {
|
|
||||||
validateCanBuildOrCreate();
|
|
||||||
return dynaBeanBuilderSupport.getPropertyNames().stream()
|
|
||||||
.map(this::getDynaProperty)
|
|
||||||
.toArray(DynaProperty[]::new);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DynaBean newInstance() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void remove(String name, String key) {
|
|
||||||
throw new UnsupportedOperationException(NO_MAP_ACCESS_SUPPORT);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void set(String name, Object value) {
|
|
||||||
validateResolvedEmptyHandler();
|
|
||||||
isDirty = true;
|
|
||||||
if (emptyPropertyHandler != null && StringUtils.isEmpty(name) && value instanceof String) {
|
|
||||||
emptyPropertyResolved = emptyPropertyHandler.apply((String) value);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ("class".equals(name)) {
|
|
||||||
reinitializeFrom(value.toString());
|
|
||||||
} else {
|
|
||||||
validateResolvedEmptyHandler();
|
|
||||||
dynaBeanBuilderSupport.set(name, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void set(String name, int index, Object value) {
|
|
||||||
validateResolvedEmptyHandler();
|
|
||||||
validateCanBuildOrCreate();
|
|
||||||
isDirty = true;
|
|
||||||
if (StringUtils.isEmpty(name)) {
|
|
||||||
dynaBeanCreateSupport.set(name, index, value);
|
|
||||||
} else {
|
|
||||||
dynaBeanBuilderSupport.set(name, index, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void set(String name, String key, Object value) {
|
|
||||||
throw new UnsupportedOperationException(NO_MAP_ACCESS_SUPPORT);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,49 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.lang.annotation.ElementType;
|
|
||||||
import java.lang.annotation.Repeatable;
|
|
||||||
import java.lang.annotation.Retention;
|
|
||||||
import java.lang.annotation.RetentionPolicy;
|
|
||||||
import java.lang.annotation.Target;
|
|
||||||
|
|
||||||
@Retention(RetentionPolicy.RUNTIME)
|
|
||||||
@Target(ElementType.FIELD)
|
|
||||||
@Repeatable(ConfigurationSettables.class)
|
|
||||||
public @interface ConfigurationSettable {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Which builder this option applies to
|
|
||||||
*
|
|
||||||
* @return the class of the builder to use
|
|
||||||
*/
|
|
||||||
Class<?> configurationClass();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The method name on the builder, defaults to the fieldName
|
|
||||||
*
|
|
||||||
* @return the name of the method or null to use the default
|
|
||||||
*/
|
|
||||||
String methodName() default "";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If the type is actually an optional value this will enable conversions
|
|
||||||
*
|
|
||||||
* @return true if the value should be wrapped by an optional
|
|
||||||
*/
|
|
||||||
boolean convertToOptional() default false;
|
|
||||||
}
|
|
||||||
|
|
@ -1,122 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.lang.reflect.Field;
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
|
||||||
import java.lang.reflect.Method;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import com.google.common.base.Defaults;
|
|
||||||
import lombok.NonNull;
|
|
||||||
import org.apache.commons.lang3.ClassUtils;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
|
|
||||||
public class ConfigurationSettableUtils {
|
|
||||||
|
|
||||||
public static <T> T resolveFields(@NonNull Object source, @NonNull T configObject) {
|
|
||||||
Map<Class<?>, Object> configObjects = new HashMap<>();
|
|
||||||
configObjects.put(configObject.getClass(), configObject);
|
|
||||||
resolveFields(source, configObjects, null, null);
|
|
||||||
|
|
||||||
return configObject;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void resolveFields(
|
|
||||||
Object source, Map<Class<?>, Object> configObjects, Set<Class<?>> restrictTo, Set<Class<?>> skipIf) {
|
|
||||||
for (Field field : source.getClass().getDeclaredFields()) {
|
|
||||||
for (ConfigurationSettable b : field.getAnnotationsByType(ConfigurationSettable.class)) {
|
|
||||||
if (restrictTo != null && !restrictTo.contains(b.configurationClass())) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (skipIf != null && skipIf.contains(b.configurationClass())) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
field.setAccessible(true);
|
|
||||||
Object configObject = configObjects.get(b.configurationClass());
|
|
||||||
if (configObject != null) {
|
|
||||||
String setterName = field.getName();
|
|
||||||
if (!StringUtils.isEmpty(b.methodName())) {
|
|
||||||
setterName = b.methodName();
|
|
||||||
}
|
|
||||||
Object value;
|
|
||||||
try {
|
|
||||||
value = field.get(source);
|
|
||||||
} catch (IllegalAccessException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (value != null && !value.equals(Defaults.defaultValue(field.getType()))) {
|
|
||||||
Method setter = null;
|
|
||||||
if (b.convertToOptional()) {
|
|
||||||
value = Optional.of(value);
|
|
||||||
}
|
|
||||||
if (ClassUtils.isPrimitiveOrWrapper(value.getClass())) {
|
|
||||||
Class<?> primitiveType = field.getType().isPrimitive()
|
|
||||||
? field.getType()
|
|
||||||
: ClassUtils.wrapperToPrimitive(field.getType());
|
|
||||||
Class<?> wrapperType = !field.getType().isPrimitive()
|
|
||||||
? field.getType()
|
|
||||||
: ClassUtils.primitiveToWrapper(field.getType());
|
|
||||||
|
|
||||||
try {
|
|
||||||
setter = b.configurationClass().getMethod(setterName, primitiveType);
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
//
|
|
||||||
// Ignore this
|
|
||||||
//
|
|
||||||
}
|
|
||||||
if (setter == null) {
|
|
||||||
try {
|
|
||||||
setter = b.configurationClass().getMethod(setterName, wrapperType);
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
try {
|
|
||||||
setter = b.configurationClass().getMethod(setterName, value.getClass());
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
// find if there is a setter which is not the exact parameter type
|
|
||||||
// but is assignable from the type
|
|
||||||
for (Method method : b.configurationClass().getMethods()) {
|
|
||||||
Class<?>[] parameterTypes = method.getParameterTypes();
|
|
||||||
if (method.getName().equals(setterName)
|
|
||||||
&& parameterTypes.length == 1
|
|
||||||
&& parameterTypes[0].isAssignableFrom(value.getClass())) {
|
|
||||||
setter = method;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (setter == null) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
setter.invoke(configObject, value);
|
|
||||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.lang.annotation.ElementType;
|
|
||||||
import java.lang.annotation.Retention;
|
|
||||||
import java.lang.annotation.RetentionPolicy;
|
|
||||||
import java.lang.annotation.Target;
|
|
||||||
|
|
||||||
@Retention(RetentionPolicy.RUNTIME)
|
|
||||||
@Target(ElementType.FIELD)
|
|
||||||
public @interface ConfigurationSettables {
|
|
||||||
ConfigurationSettable[] value();
|
|
||||||
}
|
|
||||||
|
|
@ -1,82 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.Setter;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
|
||||||
import software.amazon.kinesis.coordinator.CoordinatorConfig.CoordinatorStateTableConfig;
|
|
||||||
import software.amazon.kinesis.multilang.config.converter.TagConverter.TagCollection;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
@Setter
|
|
||||||
public class CoordinatorStateTableConfigBean {
|
|
||||||
|
|
||||||
interface CoordinatorStateConfigBeanDelegate {
|
|
||||||
String getCoordinatorStateTableName();
|
|
||||||
|
|
||||||
void setCoordinatorStateTableName(String value);
|
|
||||||
|
|
||||||
BillingMode getCoordinatorStateBillingMode();
|
|
||||||
|
|
||||||
void setCoordinatorStateBillingMode(BillingMode value);
|
|
||||||
|
|
||||||
long getCoordinatorStateReadCapacity();
|
|
||||||
|
|
||||||
void setCoordinatorStateReadCapacity(long value);
|
|
||||||
|
|
||||||
long getCoordinatorStateWriteCapacity();
|
|
||||||
|
|
||||||
void setCoordinatorStateWriteCapacity(long value);
|
|
||||||
|
|
||||||
Boolean getCoordinatorStatePointInTimeRecoveryEnabled();
|
|
||||||
|
|
||||||
void setCoordinatorStatePointInTimeRecoveryEnabled(Boolean value);
|
|
||||||
|
|
||||||
Boolean getCoordinatorStateDeletionProtectionEnabled();
|
|
||||||
|
|
||||||
void setCoordinatorStateDeletionProtectionEnabled(Boolean value);
|
|
||||||
|
|
||||||
TagCollection getCoordinatorStateTags();
|
|
||||||
|
|
||||||
void setCoordinatorStateTags(TagCollection value);
|
|
||||||
}
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorStateTableConfig.class, methodName = "tableName")
|
|
||||||
private String coordinatorStateTableName;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorStateTableConfig.class, methodName = "billingMode")
|
|
||||||
private BillingMode coordinatorStateBillingMode;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorStateTableConfig.class, methodName = "readCapacity")
|
|
||||||
private long coordinatorStateReadCapacity;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorStateTableConfig.class, methodName = "writeCapacity")
|
|
||||||
private long coordinatorStateWriteCapacity;
|
|
||||||
|
|
||||||
@ConfigurationSettable(
|
|
||||||
configurationClass = CoordinatorStateTableConfig.class,
|
|
||||||
methodName = "pointInTimeRecoveryEnabled")
|
|
||||||
private Boolean coordinatorStatePointInTimeRecoveryEnabled;
|
|
||||||
|
|
||||||
@ConfigurationSettable(
|
|
||||||
configurationClass = CoordinatorStateTableConfig.class,
|
|
||||||
methodName = "deletionProtectionEnabled")
|
|
||||||
private Boolean coordinatorStateDeletionProtectionEnabled;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorStateTableConfig.class, methodName = "tags")
|
|
||||||
private TagCollection coordinatorStateTags;
|
|
||||||
}
|
|
||||||
|
|
@ -1,258 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.lang.reflect.Array;
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
|
||||||
import java.lang.reflect.Method;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.function.Consumer;
|
|
||||||
import java.util.function.Function;
|
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
import com.google.common.collect.HashMultimap;
|
|
||||||
import com.google.common.collect.Multimap;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.apache.commons.lang3.ClassUtils;
|
|
||||||
|
|
||||||
class DynaBeanBuilderSupport {
|
|
||||||
|
|
||||||
private static final String BUILD_METHOD_NAME = "build";
|
|
||||||
private static final String BUILDER_METHOD_NAME = "builder";
|
|
||||||
|
|
||||||
private final Class<?> destinedClass;
|
|
||||||
private final ConvertUtilsBean convertUtilsBean;
|
|
||||||
private final List<String> classPrefixSearchList;
|
|
||||||
private final Class<?> builderClass;
|
|
||||||
|
|
||||||
private final Multimap<String, TypeTag> properties = HashMultimap.create();
|
|
||||||
private final Map<String, Object> values = new HashMap<>();
|
|
||||||
|
|
||||||
DynaBeanBuilderSupport(
|
|
||||||
Class<?> destinedClass, ConvertUtilsBean convertUtilsBean, List<String> classPrefixSearchList) {
|
|
||||||
this.destinedClass = destinedClass;
|
|
||||||
this.convertUtilsBean = convertUtilsBean;
|
|
||||||
this.classPrefixSearchList = classPrefixSearchList;
|
|
||||||
this.builderClass = builderClassFrom(destinedClass);
|
|
||||||
|
|
||||||
buildProperties();
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Class<?> builderClassFrom(Class<?> destinedClass) {
|
|
||||||
Method builderMethod;
|
|
||||||
try {
|
|
||||||
builderMethod = destinedClass.getMethod(BUILDER_METHOD_NAME);
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return builderMethod.getReturnType();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void buildProperties() {
|
|
||||||
if (builderClass == null) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
builderClass.getMethod(BUILD_METHOD_NAME);
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (Method method : builderClass.getMethods()) {
|
|
||||||
if (method.getParameterCount() == 1 && ClassUtils.isAssignable(builderClass, method.getReturnType())) {
|
|
||||||
Class<?> paramType = method.getParameterTypes()[0];
|
|
||||||
if (Supplier.class.isAssignableFrom(paramType) || Consumer.class.isAssignableFrom(paramType)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (paramType.isEnum()) {
|
|
||||||
properties.put(method.getName(), new TypeTag(paramType, true, method));
|
|
||||||
} else if (convertUtilsBean.lookup(paramType) == null) {
|
|
||||||
properties.put(method.getName(), new TypeTag(paramType, false, method));
|
|
||||||
} else {
|
|
||||||
properties.put(method.getName(), new TypeTag(paramType, true, method));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean isValid() {
|
|
||||||
return builderClass != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Object createForProperty(String name) {
|
|
||||||
Optional<TypeTag> type = properties.get(name).stream().findFirst();
|
|
||||||
return type.map(t -> {
|
|
||||||
if (DynaBeanBuilderUtils.isBuilderOrCreate(t.type) || !t.hasConverter) {
|
|
||||||
return new BuilderDynaBean(t.type, convertUtilsBean, null, classPrefixSearchList);
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
})
|
|
||||||
.orElse(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean hasValue(String name) {
|
|
||||||
return values.containsKey(name);
|
|
||||||
}
|
|
||||||
|
|
||||||
Object get(String name) {
|
|
||||||
if (values.containsKey(name)) {
|
|
||||||
return values.get(name);
|
|
||||||
}
|
|
||||||
Object value = createForProperty(name);
|
|
||||||
if (value != null) {
|
|
||||||
values.put(name, value);
|
|
||||||
}
|
|
||||||
return values.get(name);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Object[] retrieveAndResizeArray(String name, int index) {
|
|
||||||
Object existing = values.get(name);
|
|
||||||
Object[] destination;
|
|
||||||
if (existing != null) {
|
|
||||||
if (!existing.getClass().isArray()) {
|
|
||||||
throw new IllegalStateException("Requested get for an array, but existing value isn't an array");
|
|
||||||
}
|
|
||||||
destination = (Object[]) existing;
|
|
||||||
if (index >= destination.length) {
|
|
||||||
destination = Arrays.copyOf(destination, index + 1);
|
|
||||||
values.put(name, destination);
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
destination = new Object[index + 1];
|
|
||||||
values.put(name, destination);
|
|
||||||
}
|
|
||||||
|
|
||||||
return destination;
|
|
||||||
}
|
|
||||||
|
|
||||||
Object get(String name, int index) {
|
|
||||||
Object[] destination = retrieveAndResizeArray(name, index);
|
|
||||||
|
|
||||||
if (destination[index] == null) {
|
|
||||||
destination[index] = createForProperty(name);
|
|
||||||
}
|
|
||||||
return destination[index];
|
|
||||||
}
|
|
||||||
|
|
||||||
void set(String name, Object value) {
|
|
||||||
if (value instanceof String && properties.get(name).stream().anyMatch(t -> t.type.isEnum())) {
|
|
||||||
TypeTag typeTag = properties.get(name).stream()
|
|
||||||
.filter(t -> t.type.isEnum())
|
|
||||||
.findFirst()
|
|
||||||
.orElseThrow(() ->
|
|
||||||
new IllegalStateException("Expected enum type for " + name + ", but couldn't find it."));
|
|
||||||
Class<? extends Enum> enumClass = (Class<? extends Enum>) typeTag.type;
|
|
||||||
values.put(name, Enum.valueOf(enumClass, value.toString()));
|
|
||||||
} else {
|
|
||||||
values.put(name, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void set(String name, int index, Object value) {
|
|
||||||
Object[] destination = retrieveAndResizeArray(name, index);
|
|
||||||
destination[index] = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Object getArgument(Map.Entry<String, Object> setValue) {
|
|
||||||
Object argument = setValue.getValue();
|
|
||||||
if (argument instanceof Object[]) {
|
|
||||||
TypeTag arrayType = properties.get(setValue.getKey()).stream()
|
|
||||||
.filter(t -> t.type.isArray())
|
|
||||||
.findFirst()
|
|
||||||
.orElseThrow(() -> new IllegalStateException(String.format(
|
|
||||||
"Received Object[] for %s but can't find corresponding type", setValue.getKey())));
|
|
||||||
Object[] arrayValues = (Object[]) argument;
|
|
||||||
Object[] destination = (Object[]) Array.newInstance(arrayType.type.getComponentType(), arrayValues.length);
|
|
||||||
|
|
||||||
for (int i = 0; i < arrayValues.length; ++i) {
|
|
||||||
if (arrayValues[i] instanceof BuilderDynaBean) {
|
|
||||||
destination[i] = ((BuilderDynaBean) arrayValues[i]).build(Object.class);
|
|
||||||
} else {
|
|
||||||
destination[i] = arrayValues[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return destination;
|
|
||||||
}
|
|
||||||
if (argument instanceof BuilderDynaBean) {
|
|
||||||
argument = ((BuilderDynaBean) argument).build(Object.class);
|
|
||||||
}
|
|
||||||
return argument;
|
|
||||||
}
|
|
||||||
|
|
||||||
Object build(Function<Object, Object>... additionalMutators) {
|
|
||||||
Method builderMethod;
|
|
||||||
try {
|
|
||||||
builderMethod = destinedClass.getMethod(BUILDER_METHOD_NAME);
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
Object source;
|
|
||||||
try {
|
|
||||||
source = builderMethod.invoke(null);
|
|
||||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
for (Map.Entry<String, Object> setValue : values.entrySet()) {
|
|
||||||
Object argument = getArgument(setValue);
|
|
||||||
Method mutator = properties.get(setValue.getKey()).stream()
|
|
||||||
.filter(t -> ClassUtils.isAssignable(argument.getClass(), t.type))
|
|
||||||
.findFirst()
|
|
||||||
.map(a -> a.builderMethod)
|
|
||||||
.orElseThrow(() -> new IllegalStateException(String.format(
|
|
||||||
"Unable to find mutator for %s of type %s",
|
|
||||||
setValue.getKey(), argument.getClass().getName())));
|
|
||||||
try {
|
|
||||||
source = mutator.invoke(source, argument);
|
|
||||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (additionalMutators != null) {
|
|
||||||
for (Function<Object, Object> mutator : additionalMutators) {
|
|
||||||
source = mutator.apply(source);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Method buildMethod;
|
|
||||||
try {
|
|
||||||
buildMethod = builderClass.getMethod(BUILD_METHOD_NAME);
|
|
||||||
return buildMethod.invoke(source);
|
|
||||||
} catch (IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Collection<String> getPropertyNames() {
|
|
||||||
return properties.keySet();
|
|
||||||
}
|
|
||||||
|
|
||||||
List<TypeTag> getProperty(String name) {
|
|
||||||
if (!properties.containsKey(name)) {
|
|
||||||
throw new IllegalArgumentException("Unknown property: " + name);
|
|
||||||
}
|
|
||||||
return new ArrayList<>(properties.get(name));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,56 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
|
||||||
import java.lang.reflect.Method;
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
public class DynaBeanBuilderUtils {
|
|
||||||
|
|
||||||
static Method getMethod(Class<?> clazz, String name, Class<?>... parameterTypes) {
|
|
||||||
try {
|
|
||||||
return clazz.getMethod(name, parameterTypes);
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static Object invokeOrFail(Method method, Object onObject, Object... arguments) {
|
|
||||||
try {
|
|
||||||
return method.invoke(onObject, arguments);
|
|
||||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static boolean isBuilderOrCreate(Class<?> clazz) {
|
|
||||||
Method buildMethod = null;
|
|
||||||
|
|
||||||
try {
|
|
||||||
buildMethod = clazz.getMethod("builder");
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
//
|
|
||||||
// Ignored
|
|
||||||
//
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean hasCreate = Arrays.stream(clazz.getMethods())
|
|
||||||
.anyMatch(m -> "create".equals(m.getName()) && m.getReturnType().isAssignableFrom(clazz));
|
|
||||||
|
|
||||||
return buildMethod != null || hasCreate;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,99 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.lang.reflect.Method;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
|
|
||||||
class DynaBeanCreateSupport {
|
|
||||||
|
|
||||||
private final Class<?> destinedClass;
|
|
||||||
private final ConvertUtilsBean convertUtilsBean;
|
|
||||||
private final List<String> classPrefixSearchList;
|
|
||||||
private final List<TypeTag> createTypes = new ArrayList<>();
|
|
||||||
private Object[] createValues = null;
|
|
||||||
|
|
||||||
DynaBeanCreateSupport(
|
|
||||||
Class<?> destinedClass, ConvertUtilsBean convertUtilsBean, List<String> classPrefixSearchList) {
|
|
||||||
this.destinedClass = destinedClass;
|
|
||||||
this.convertUtilsBean = convertUtilsBean;
|
|
||||||
this.classPrefixSearchList = classPrefixSearchList;
|
|
||||||
|
|
||||||
readTypes();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void readTypes() {
|
|
||||||
for (Method method : destinedClass.getMethods()) {
|
|
||||||
if ("create".equals(method.getName()) && method.getReturnType().isAssignableFrom(destinedClass)) {
|
|
||||||
createValues = new Object[method.getParameterCount()];
|
|
||||||
int i = 0;
|
|
||||||
for (Class<?> paramType : method.getParameterTypes()) {
|
|
||||||
if (convertUtilsBean.lookup(paramType) != null) {
|
|
||||||
createTypes.add(new TypeTag(paramType, true, null));
|
|
||||||
} else {
|
|
||||||
createTypes.add(new TypeTag(paramType, false, null));
|
|
||||||
}
|
|
||||||
++i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Object build() {
|
|
||||||
|
|
||||||
Method createMethod = DynaBeanBuilderUtils.getMethod(
|
|
||||||
destinedClass, "create", createTypes.stream().map(t -> t.type).toArray(i -> new Class<?>[i]));
|
|
||||||
Object arguments[] = new Object[createValues.length];
|
|
||||||
for (int i = 0; i < createValues.length; ++i) {
|
|
||||||
if (createValues[i] instanceof BuilderDynaBean) {
|
|
||||||
arguments[i] = ((BuilderDynaBean) createValues[i]).build(createTypes.get(i).type);
|
|
||||||
} else {
|
|
||||||
arguments[i] = createValues[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return DynaBeanBuilderUtils.invokeOrFail(createMethod, null, arguments);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Object get(String name, int index) {
|
|
||||||
if (index < createValues.length) {
|
|
||||||
if (createTypes.get(index).hasConverter) {
|
|
||||||
return createValues[index];
|
|
||||||
} else {
|
|
||||||
if (createValues[index] == null) {
|
|
||||||
createValues[index] = new BuilderDynaBean(
|
|
||||||
createTypes.get(index).type, convertUtilsBean, null, classPrefixSearchList);
|
|
||||||
}
|
|
||||||
return createValues[index];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void set(String name, int index, Object value) {
|
|
||||||
if (StringUtils.isEmpty(name)) {
|
|
||||||
if (index >= createValues.length) {
|
|
||||||
throw new IllegalArgumentException(String.format(
|
|
||||||
"%d exceeds the maximum number of arguments (%d) for %s",
|
|
||||||
index, createValues.length, destinedClass.getName()));
|
|
||||||
}
|
|
||||||
createValues[index] = value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.Setter;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.kinesis.retrieval.fanout.FanOutConfig;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
@Setter
|
|
||||||
public class FanoutConfigBean implements RetrievalConfigBuilder {
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = FanOutConfig.class)
|
|
||||||
private int maxDescribeStreamSummaryRetries;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = FanOutConfig.class)
|
|
||||||
private String consumerArn;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = FanOutConfig.class)
|
|
||||||
private String consumerName;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = FanOutConfig.class)
|
|
||||||
private int maxDescribeStreamConsumerRetries;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = FanOutConfig.class)
|
|
||||||
private int registerStreamConsumerRetries;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = FanOutConfig.class)
|
|
||||||
private long retryBackoffMillis;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public FanOutConfig build(KinesisAsyncClient kinesisAsyncClient, MultiLangDaemonConfiguration parent) {
|
|
||||||
return ConfigurationSettableUtils.resolveFields(
|
|
||||||
this,
|
|
||||||
new FanOutConfig(kinesisAsyncClient)
|
|
||||||
.applicationName(parent.getApplicationName())
|
|
||||||
.streamName(parent.getStreamName()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.Setter;
|
|
||||||
import software.amazon.kinesis.leases.LeaseManagementConfig;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
@Setter
|
|
||||||
public class GracefulLeaseHandoffConfigBean {
|
|
||||||
|
|
||||||
interface GracefulLeaseHandoffConfigBeanDelegate {
|
|
||||||
Long getGracefulLeaseHandoffTimeoutMillis();
|
|
||||||
|
|
||||||
void setGracefulLeaseHandoffTimeoutMillis(Long value);
|
|
||||||
|
|
||||||
Boolean getIsGracefulLeaseHandoffEnabled();
|
|
||||||
|
|
||||||
void setIsGracefulLeaseHandoffEnabled(Boolean value);
|
|
||||||
}
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.GracefulLeaseHandoffConfig.class)
|
|
||||||
private Long gracefulLeaseHandoffTimeoutMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.GracefulLeaseHandoffConfig.class)
|
|
||||||
private Boolean isGracefulLeaseHandoffEnabled;
|
|
||||||
}
|
|
||||||
|
|
@ -1,126 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
|
||||||
import java.util.Properties;
|
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.apache.commons.lang3.Validate;
|
|
||||||
import software.amazon.awssdk.arns.Arn;
|
|
||||||
import software.amazon.kinesis.common.StreamIdentifier;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* KinesisClientLibConfigurator constructs a KinesisClientLibConfiguration from java properties file. The following
|
|
||||||
* three properties must be provided. 1) "applicationName" 2) "streamName" 3) "AwsCredentialsProvider"
|
|
||||||
* KinesisClientLibConfigurator will help to automatically assign the value of "workerId" if this property is not
|
|
||||||
* provided. In the specified properties file, any properties, which matches the variable name in
|
|
||||||
* KinesisClientLibConfiguration and has a corresponding "with{variableName}" setter method, will be read in, and its
|
|
||||||
* value in properties file will be assigned to corresponding variable in KinesisClientLibConfiguration.
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
public class KinesisClientLibConfigurator {
|
|
||||||
private final ConvertUtilsBean convertUtilsBean;
|
|
||||||
private final BeanUtilsBean utilsBean;
|
|
||||||
private final MultiLangDaemonConfiguration configuration;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor.
|
|
||||||
*/
|
|
||||||
public KinesisClientLibConfigurator() {
|
|
||||||
this.convertUtilsBean = new ConvertUtilsBean();
|
|
||||||
this.utilsBean = new BeanUtilsBean(convertUtilsBean);
|
|
||||||
this.configuration = new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return a KinesisClientLibConfiguration with variables configured as specified by the properties in config stream.
|
|
||||||
* Program will fail immediately, if customer provide: 1) invalid variable value. Program will log it as warning and
|
|
||||||
* continue, if customer provide: 1) variable with unsupported variable type. 2) a variable with name which does not
|
|
||||||
* match any of the variables in KinesisClientLibConfigration.
|
|
||||||
*
|
|
||||||
* @param properties a Properties object containing the configuration information
|
|
||||||
* @return KinesisClientLibConfiguration
|
|
||||||
*/
|
|
||||||
public MultiLangDaemonConfiguration getConfiguration(Properties properties) {
|
|
||||||
properties.entrySet().forEach(e -> {
|
|
||||||
try {
|
|
||||||
log.info("Processing (key={}, value={})", e.getKey(), e.getValue());
|
|
||||||
utilsBean.setProperty(configuration, processKey((String) e.getKey()), e.getValue());
|
|
||||||
} catch (IllegalAccessException | InvocationTargetException ex) {
|
|
||||||
throw new RuntimeException(ex);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Validate.notBlank(configuration.getApplicationName(), "Application name is required");
|
|
||||||
|
|
||||||
if (configuration.getStreamArn() != null
|
|
||||||
&& !configuration.getStreamArn().trim().isEmpty()) {
|
|
||||||
final Arn streamArnObj = Arn.fromString(configuration.getStreamArn());
|
|
||||||
StreamIdentifier.validateArn(streamArnObj);
|
|
||||||
// Parse out the stream Name from the Arn (and/or override existing value for Stream Name)
|
|
||||||
final String streamNameFromArn = streamArnObj.resource().resource();
|
|
||||||
configuration.setStreamName(streamNameFromArn);
|
|
||||||
}
|
|
||||||
|
|
||||||
Validate.notBlank(
|
|
||||||
configuration.getStreamName(),
|
|
||||||
"Stream name or Stream Arn is required. Stream Arn takes precedence if both are passed in.");
|
|
||||||
Validate.isTrue(
|
|
||||||
configuration.getKinesisCredentialsProvider().isDirty(),
|
|
||||||
"A basic set of AWS credentials must be provided");
|
|
||||||
|
|
||||||
return configuration;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param configStream the input stream containing the configuration information
|
|
||||||
* @return KinesisClientLibConfiguration
|
|
||||||
*/
|
|
||||||
public MultiLangDaemonConfiguration getConfiguration(InputStream configStream) {
|
|
||||||
Properties properties = new Properties();
|
|
||||||
try {
|
|
||||||
properties.load(configStream);
|
|
||||||
} catch (IOException e) {
|
|
||||||
String msg = "Could not load properties from the stream provided";
|
|
||||||
throw new IllegalStateException(msg, e);
|
|
||||||
} finally {
|
|
||||||
try {
|
|
||||||
configStream.close();
|
|
||||||
} catch (IOException e) {
|
|
||||||
String msg = "Encountered error while trying to close properties file.";
|
|
||||||
throw new IllegalStateException(msg, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return getConfiguration(properties);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Processes a configuration key to normalize AWS credentials provider naming. Necessary to conform to
|
|
||||||
* autogenerated setters.
|
|
||||||
* @param key the config param key
|
|
||||||
* @return case-configured param key name
|
|
||||||
*/
|
|
||||||
String processKey(String key) {
|
|
||||||
if (key.toLowerCase().startsWith("awscredentialsprovider")) {
|
|
||||||
key = key.replaceAll("(?i)awscredentialsprovider", "awsCredentialsProvider");
|
|
||||||
}
|
|
||||||
return key;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,531 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
|
||||||
import java.net.URI;
|
|
||||||
import java.time.Duration;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.Date;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.UUID;
|
|
||||||
import java.util.function.Function;
|
|
||||||
|
|
||||||
import lombok.Data;
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.Setter;
|
|
||||||
import lombok.experimental.Delegate;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.apache.commons.beanutils.Converter;
|
|
||||||
import org.apache.commons.beanutils.converters.ArrayConverter;
|
|
||||||
import org.apache.commons.beanutils.converters.StringConverter;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.regions.Region;
|
|
||||||
import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder;
|
|
||||||
import software.amazon.kinesis.checkpoint.CheckpointConfig;
|
|
||||||
import software.amazon.kinesis.common.ConfigsBuilder;
|
|
||||||
import software.amazon.kinesis.common.InitialPositionInStream;
|
|
||||||
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
|
|
||||||
import software.amazon.kinesis.common.KinesisClientUtil;
|
|
||||||
import software.amazon.kinesis.coordinator.CoordinatorConfig;
|
|
||||||
import software.amazon.kinesis.coordinator.Scheduler;
|
|
||||||
import software.amazon.kinesis.leases.LeaseManagementConfig;
|
|
||||||
import software.amazon.kinesis.leases.ShardPrioritization;
|
|
||||||
import software.amazon.kinesis.lifecycle.LifecycleConfig;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsConfig;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsLevel;
|
|
||||||
import software.amazon.kinesis.multilang.config.converter.DurationConverter;
|
|
||||||
import software.amazon.kinesis.multilang.config.converter.TagConverter;
|
|
||||||
import software.amazon.kinesis.multilang.config.converter.TagConverter.TagCollection;
|
|
||||||
import software.amazon.kinesis.processor.ProcessorConfig;
|
|
||||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
|
||||||
import software.amazon.kinesis.retrieval.RetrievalConfig;
|
|
||||||
import software.amazon.kinesis.retrieval.polling.PollingConfig;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
@Setter
|
|
||||||
@Slf4j
|
|
||||||
public class MultiLangDaemonConfiguration {
|
|
||||||
|
|
||||||
private static final String CREDENTIALS_DEFAULT_SEARCH_PATH = "software.amazon.awssdk.auth.credentials";
|
|
||||||
|
|
||||||
private String applicationName;
|
|
||||||
|
|
||||||
private String streamName;
|
|
||||||
private String streamArn;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigsBuilder.class)
|
|
||||||
private String tableName;
|
|
||||||
|
|
||||||
private String workerIdentifier = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
public void setWorkerId(String workerId) {
|
|
||||||
this.workerIdentifier = workerId;
|
|
||||||
}
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private long failoverTimeMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private Boolean enablePriorityLeaseAssignment;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private Boolean leaseTableDeletionProtectionEnabled;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private Boolean leaseTablePitrEnabled;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private long shardSyncIntervalMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private boolean cleanupLeasesUponShardCompletion;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private boolean ignoreUnexpectedChildShards;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private int maxLeasesForWorker;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private int maxLeasesToStealAtOneTime;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private int initialLeaseTableReadCapacity;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private int initialLeaseTableWriteCapacity;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class, methodName = "initialPositionInStream")
|
|
||||||
@ConfigurationSettable(configurationClass = RetrievalConfig.class)
|
|
||||||
private InitialPositionInStreamExtended initialPositionInStreamExtended;
|
|
||||||
|
|
||||||
public InitialPositionInStream getInitialPositionInStream() {
|
|
||||||
if (initialPositionInStreamExtended != null) {
|
|
||||||
return initialPositionInStreamExtended.getInitialPositionInStream();
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setInitialPositionInStream(InitialPositionInStream initialPositionInStream) {
|
|
||||||
this.initialPositionInStreamExtended =
|
|
||||||
InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream);
|
|
||||||
}
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private int maxLeaseRenewalThreads;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private long listShardsBackoffTimeInMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LeaseManagementConfig.class)
|
|
||||||
private int maxListShardsRetryAttempts;
|
|
||||||
|
|
||||||
// Enables applications flush/checkpoint (if they have some data "in progress", but don't get new data for while)
|
|
||||||
@ConfigurationSettable(configurationClass = ProcessorConfig.class)
|
|
||||||
private boolean callProcessRecordsEvenForEmptyRecordList;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorConfig.class)
|
|
||||||
private long parentShardPollIntervalMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorConfig.class)
|
|
||||||
private ShardPrioritization shardPrioritization;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorConfig.class)
|
|
||||||
private boolean skipShardSyncAtWorkerInitializationIfLeasesExist;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorConfig.class)
|
|
||||||
private long schedulerInitializationBackoffTimeMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = CoordinatorConfig.class)
|
|
||||||
private CoordinatorConfig.ClientVersionConfig clientVersionConfig;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LifecycleConfig.class)
|
|
||||||
private long taskBackoffTimeMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = MetricsConfig.class)
|
|
||||||
private long metricsBufferTimeMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = MetricsConfig.class)
|
|
||||||
private int metricsMaxQueueSize;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = MetricsConfig.class)
|
|
||||||
private MetricsLevel metricsLevel;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = LifecycleConfig.class, convertToOptional = true)
|
|
||||||
private Long logWarningForTaskAfterMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = MetricsConfig.class)
|
|
||||||
private Set<String> metricsEnabledDimensions;
|
|
||||||
|
|
||||||
public String[] getMetricsEnabledDimensions() {
|
|
||||||
return metricsEnabledDimensions.toArray(new String[0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setMetricsEnabledDimensions(String[] dimensions) {
|
|
||||||
metricsEnabledDimensions = new HashSet<>(Arrays.asList(dimensions));
|
|
||||||
}
|
|
||||||
|
|
||||||
private RetrievalMode retrievalMode = RetrievalMode.DEFAULT;
|
|
||||||
|
|
||||||
private final FanoutConfigBean fanoutConfig = new FanoutConfigBean();
|
|
||||||
|
|
||||||
@Delegate(types = PollingConfigBean.PollingConfigBeanDelegate.class)
|
|
||||||
private final PollingConfigBean pollingConfig = new PollingConfigBean();
|
|
||||||
|
|
||||||
@Delegate(types = GracefulLeaseHandoffConfigBean.GracefulLeaseHandoffConfigBeanDelegate.class)
|
|
||||||
private final GracefulLeaseHandoffConfigBean gracefulLeaseHandoffConfigBean = new GracefulLeaseHandoffConfigBean();
|
|
||||||
|
|
||||||
@Delegate(
|
|
||||||
types = WorkerUtilizationAwareAssignmentConfigBean.WorkerUtilizationAwareAssignmentConfigBeanDelegate.class)
|
|
||||||
private final WorkerUtilizationAwareAssignmentConfigBean workerUtilizationAwareAssignmentConfigBean =
|
|
||||||
new WorkerUtilizationAwareAssignmentConfigBean();
|
|
||||||
|
|
||||||
@Delegate(types = WorkerMetricStatsTableConfigBean.WorkerMetricsTableConfigBeanDelegate.class)
|
|
||||||
private final WorkerMetricStatsTableConfigBean workerMetricStatsTableConfigBean =
|
|
||||||
new WorkerMetricStatsTableConfigBean();
|
|
||||||
|
|
||||||
@Delegate(types = CoordinatorStateTableConfigBean.CoordinatorStateConfigBeanDelegate.class)
|
|
||||||
private final CoordinatorStateTableConfigBean coordinatorStateTableConfigBean =
|
|
||||||
new CoordinatorStateTableConfigBean();
|
|
||||||
|
|
||||||
private boolean validateSequenceNumberBeforeCheckpointing;
|
|
||||||
|
|
||||||
private long shutdownGraceMillis;
|
|
||||||
private Integer timeoutInSeconds;
|
|
||||||
|
|
||||||
private final BuilderDynaBean kinesisCredentialsProvider;
|
|
||||||
|
|
||||||
public void setAwsCredentialsProvider(String providerString) {
|
|
||||||
kinesisCredentialsProvider.set("", providerString);
|
|
||||||
}
|
|
||||||
|
|
||||||
private final BuilderDynaBean dynamoDBCredentialsProvider;
|
|
||||||
|
|
||||||
public void setAwsCredentialsProviderDynamoDB(String providerString) {
|
|
||||||
dynamoDBCredentialsProvider.set("", providerString);
|
|
||||||
}
|
|
||||||
|
|
||||||
private final BuilderDynaBean cloudWatchCredentialsProvider;
|
|
||||||
|
|
||||||
public void setAwsCredentialsProviderCloudWatch(String providerString) {
|
|
||||||
cloudWatchCredentialsProvider.set("", providerString);
|
|
||||||
}
|
|
||||||
|
|
||||||
private final BuilderDynaBean kinesisClient;
|
|
||||||
private final BuilderDynaBean dynamoDbClient;
|
|
||||||
private final BuilderDynaBean cloudWatchClient;
|
|
||||||
|
|
||||||
private final BeanUtilsBean utilsBean;
|
|
||||||
private final ConvertUtilsBean convertUtilsBean;
|
|
||||||
|
|
||||||
public MultiLangDaemonConfiguration(BeanUtilsBean utilsBean, ConvertUtilsBean convertUtilsBean) {
|
|
||||||
this.utilsBean = utilsBean;
|
|
||||||
this.convertUtilsBean = convertUtilsBean;
|
|
||||||
|
|
||||||
convertUtilsBean.register(
|
|
||||||
new Converter() {
|
|
||||||
@Override
|
|
||||||
public <T> T convert(Class<T> type, Object value) {
|
|
||||||
Date date = new Date(Long.parseLong(value.toString()) * 1000L);
|
|
||||||
return type.cast(InitialPositionInStreamExtended.newInitialPositionAtTimestamp(date));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
InitialPositionInStreamExtended.class);
|
|
||||||
|
|
||||||
convertUtilsBean.register(
|
|
||||||
new Converter() {
|
|
||||||
@Override
|
|
||||||
public <T> T convert(Class<T> type, Object value) {
|
|
||||||
return type.cast(MetricsLevel.valueOf(value.toString().toUpperCase()));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
MetricsLevel.class);
|
|
||||||
|
|
||||||
convertUtilsBean.register(
|
|
||||||
new Converter() {
|
|
||||||
@Override
|
|
||||||
public <T> T convert(Class<T> type, Object value) {
|
|
||||||
return type.cast(
|
|
||||||
InitialPositionInStream.valueOf(value.toString().toUpperCase()));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
InitialPositionInStream.class);
|
|
||||||
|
|
||||||
convertUtilsBean.register(
|
|
||||||
new Converter() {
|
|
||||||
@Override
|
|
||||||
public <T> T convert(Class<T> type, Object value) {
|
|
||||||
return type.cast(CoordinatorConfig.ClientVersionConfig.valueOf(
|
|
||||||
value.toString().toUpperCase()));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
CoordinatorConfig.ClientVersionConfig.class);
|
|
||||||
|
|
||||||
convertUtilsBean.register(
|
|
||||||
new Converter() {
|
|
||||||
@Override
|
|
||||||
public <T> T convert(Class<T> type, Object value) {
|
|
||||||
return type.cast(BillingMode.valueOf(value.toString().toUpperCase()));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
BillingMode.class);
|
|
||||||
|
|
||||||
convertUtilsBean.register(
|
|
||||||
new Converter() {
|
|
||||||
@Override
|
|
||||||
public <T> T convert(Class<T> type, Object value) {
|
|
||||||
return type.cast(URI.create(value.toString()));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
URI.class);
|
|
||||||
|
|
||||||
convertUtilsBean.register(
|
|
||||||
new Converter() {
|
|
||||||
@Override
|
|
||||||
public <T> T convert(Class<T> type, Object value) {
|
|
||||||
return type.cast(RetrievalMode.from(value.toString()));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
RetrievalMode.class);
|
|
||||||
|
|
||||||
convertUtilsBean.register(
|
|
||||||
new Converter() {
|
|
||||||
@Override
|
|
||||||
public <T> T convert(final Class<T> type, final Object value) {
|
|
||||||
return type.cast(Region.of(value.toString()));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Region.class);
|
|
||||||
|
|
||||||
convertUtilsBean.register(new DurationConverter(), Duration.class);
|
|
||||||
convertUtilsBean.register(new TagConverter(), TagCollection.class);
|
|
||||||
|
|
||||||
ArrayConverter arrayConverter = new ArrayConverter(String[].class, new StringConverter());
|
|
||||||
arrayConverter.setDelimiter(',');
|
|
||||||
convertUtilsBean.register(arrayConverter, String[].class);
|
|
||||||
AwsCredentialsProviderPropertyValueDecoder credentialsDecoder =
|
|
||||||
new AwsCredentialsProviderPropertyValueDecoder();
|
|
||||||
Function<String, ?> converter = credentialsDecoder::decodeValue;
|
|
||||||
|
|
||||||
this.kinesisCredentialsProvider = new BuilderDynaBean(
|
|
||||||
AwsCredentialsProvider.class, convertUtilsBean, converter, CREDENTIALS_DEFAULT_SEARCH_PATH);
|
|
||||||
this.dynamoDBCredentialsProvider = new BuilderDynaBean(
|
|
||||||
AwsCredentialsProvider.class, convertUtilsBean, converter, CREDENTIALS_DEFAULT_SEARCH_PATH);
|
|
||||||
this.cloudWatchCredentialsProvider = new BuilderDynaBean(
|
|
||||||
AwsCredentialsProvider.class, convertUtilsBean, converter, CREDENTIALS_DEFAULT_SEARCH_PATH);
|
|
||||||
|
|
||||||
this.kinesisClient = new BuilderDynaBean(KinesisAsyncClient.class, convertUtilsBean);
|
|
||||||
this.dynamoDbClient = new BuilderDynaBean(DynamoDbAsyncClient.class, convertUtilsBean);
|
|
||||||
this.cloudWatchClient = new BuilderDynaBean(CloudWatchAsyncClient.class, convertUtilsBean);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void setRegionForClient(String name, BuilderDynaBean client, Region region) {
|
|
||||||
try {
|
|
||||||
utilsBean.setProperty(client, "region", region);
|
|
||||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
|
||||||
log.error("Failed to set region on {}", name, e);
|
|
||||||
throw new IllegalStateException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setRegionName(Region region) {
|
|
||||||
setRegionForClient("kinesisClient", kinesisClient, region);
|
|
||||||
setRegionForClient("dynamoDbClient", dynamoDbClient, region);
|
|
||||||
setRegionForClient("cloudWatchClient", cloudWatchClient, region);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void setEndpointForClient(String name, BuilderDynaBean client, String endpoint) {
|
|
||||||
try {
|
|
||||||
utilsBean.setProperty(client, "endpointOverride", endpoint);
|
|
||||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
|
||||||
log.error("Failed to set endpoint on {}", name, e);
|
|
||||||
throw new IllegalStateException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setKinesisEndpoint(String endpoint) {
|
|
||||||
setEndpointForClient("kinesisClient", kinesisClient, endpoint);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setDynamoDBEndpoint(String endpoint) {
|
|
||||||
setEndpointForClient("dynamoDbClient", dynamoDbClient, endpoint);
|
|
||||||
}
|
|
||||||
|
|
||||||
private AwsCredentialsProvider resolveCredentials(BuilderDynaBean credsBuilder) {
|
|
||||||
if (!credsBuilder.isDirty()) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return credsBuilder.build(AwsCredentialsProvider.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void updateCredentials(
|
|
||||||
BuilderDynaBean toUpdate, AwsCredentialsProvider primary, AwsCredentialsProvider secondary) {
|
|
||||||
|
|
||||||
if (toUpdate.hasValue("credentialsProvider")) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (primary != null) {
|
|
||||||
utilsBean.setProperty(toUpdate, "credentialsProvider", primary);
|
|
||||||
} else if (secondary != null) {
|
|
||||||
utilsBean.setProperty(toUpdate, "credentialsProvider", secondary);
|
|
||||||
}
|
|
||||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
|
||||||
throw new RuntimeException("Unable to update credentials", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void addConfigObjects(Map<Class<?>, Object> configObjects, Object... toAdd) {
|
|
||||||
for (Object obj : toAdd) {
|
|
||||||
configObjects.put(obj.getClass(), obj);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void resolveFields(Map<Class<?>, Object> configObjects, Set<Class<?>> restrictTo, Set<Class<?>> skipIf) {
|
|
||||||
ConfigurationSettableUtils.resolveFields(this, configObjects, restrictTo, skipIf);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void handleRetrievalConfig(RetrievalConfig retrievalConfig, ConfigsBuilder configsBuilder) {
|
|
||||||
retrievalConfig.retrievalSpecificConfig(
|
|
||||||
retrievalMode.builder(this).build(configsBuilder.kinesisClient(), this));
|
|
||||||
}
|
|
||||||
|
|
||||||
private void handleCoordinatorConfig(CoordinatorConfig coordinatorConfig) {
|
|
||||||
ConfigurationSettableUtils.resolveFields(
|
|
||||||
this.coordinatorStateTableConfigBean, coordinatorConfig.coordinatorStateTableConfig());
|
|
||||||
}
|
|
||||||
|
|
||||||
private void handleLeaseManagementConfig(LeaseManagementConfig leaseManagementConfig) {
|
|
||||||
ConfigurationSettableUtils.resolveFields(
|
|
||||||
this.gracefulLeaseHandoffConfigBean, leaseManagementConfig.gracefulLeaseHandoffConfig());
|
|
||||||
ConfigurationSettableUtils.resolveFields(
|
|
||||||
this.workerUtilizationAwareAssignmentConfigBean,
|
|
||||||
leaseManagementConfig.workerUtilizationAwareAssignmentConfig());
|
|
||||||
ConfigurationSettableUtils.resolveFields(
|
|
||||||
this.workerMetricStatsTableConfigBean,
|
|
||||||
leaseManagementConfig.workerUtilizationAwareAssignmentConfig().workerMetricsTableConfig());
|
|
||||||
}
|
|
||||||
|
|
||||||
private Object adjustKinesisHttpConfiguration(Object builderObj) {
|
|
||||||
if (builderObj instanceof KinesisAsyncClientBuilder) {
|
|
||||||
KinesisAsyncClientBuilder builder = (KinesisAsyncClientBuilder) builderObj;
|
|
||||||
return builder.applyMutation(KinesisClientUtil::adjustKinesisClientBuilder);
|
|
||||||
}
|
|
||||||
|
|
||||||
return builderObj;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Data
|
|
||||||
static class ResolvedConfiguration {
|
|
||||||
final CoordinatorConfig coordinatorConfig;
|
|
||||||
final CheckpointConfig checkpointConfig;
|
|
||||||
final LeaseManagementConfig leaseManagementConfig;
|
|
||||||
final LifecycleConfig lifecycleConfig;
|
|
||||||
final MetricsConfig metricsConfig;
|
|
||||||
final ProcessorConfig processorConfig;
|
|
||||||
final RetrievalConfig retrievalConfig;
|
|
||||||
|
|
||||||
public Scheduler build() {
|
|
||||||
return new Scheduler(
|
|
||||||
checkpointConfig,
|
|
||||||
coordinatorConfig,
|
|
||||||
leaseManagementConfig,
|
|
||||||
lifecycleConfig,
|
|
||||||
metricsConfig,
|
|
||||||
processorConfig,
|
|
||||||
retrievalConfig);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ResolvedConfiguration resolvedConfiguration(ShardRecordProcessorFactory shardRecordProcessorFactory) {
|
|
||||||
AwsCredentialsProvider kinesisCreds = resolveCredentials(kinesisCredentialsProvider);
|
|
||||||
AwsCredentialsProvider dynamoDbCreds = resolveCredentials(dynamoDBCredentialsProvider);
|
|
||||||
AwsCredentialsProvider cloudwatchCreds = resolveCredentials(cloudWatchCredentialsProvider);
|
|
||||||
|
|
||||||
updateCredentials(kinesisClient, kinesisCreds, kinesisCreds);
|
|
||||||
updateCredentials(dynamoDbClient, dynamoDbCreds, kinesisCreds);
|
|
||||||
updateCredentials(cloudWatchClient, cloudwatchCreds, kinesisCreds);
|
|
||||||
|
|
||||||
KinesisAsyncClient kinesisAsyncClient =
|
|
||||||
kinesisClient.build(KinesisAsyncClient.class, this::adjustKinesisHttpConfiguration);
|
|
||||||
DynamoDbAsyncClient dynamoDbAsyncClient = dynamoDbClient.build(DynamoDbAsyncClient.class);
|
|
||||||
CloudWatchAsyncClient cloudWatchAsyncClient = cloudWatchClient.build(CloudWatchAsyncClient.class);
|
|
||||||
|
|
||||||
ConfigsBuilder configsBuilder = new ConfigsBuilder(
|
|
||||||
streamName,
|
|
||||||
applicationName,
|
|
||||||
kinesisAsyncClient,
|
|
||||||
dynamoDbAsyncClient,
|
|
||||||
cloudWatchAsyncClient,
|
|
||||||
workerIdentifier,
|
|
||||||
shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
Map<Class<?>, Object> configObjects = new HashMap<>();
|
|
||||||
addConfigObjects(configObjects, configsBuilder);
|
|
||||||
|
|
||||||
resolveFields(
|
|
||||||
configObjects, Collections.singleton(ConfigsBuilder.class), Collections.singleton(PollingConfig.class));
|
|
||||||
|
|
||||||
CoordinatorConfig coordinatorConfig = configsBuilder.coordinatorConfig();
|
|
||||||
CheckpointConfig checkpointConfig = configsBuilder.checkpointConfig();
|
|
||||||
LeaseManagementConfig leaseManagementConfig = configsBuilder.leaseManagementConfig();
|
|
||||||
LifecycleConfig lifecycleConfig = configsBuilder.lifecycleConfig();
|
|
||||||
MetricsConfig metricsConfig = configsBuilder.metricsConfig();
|
|
||||||
ProcessorConfig processorConfig = configsBuilder.processorConfig();
|
|
||||||
RetrievalConfig retrievalConfig = configsBuilder.retrievalConfig();
|
|
||||||
|
|
||||||
addConfigObjects(
|
|
||||||
configObjects,
|
|
||||||
coordinatorConfig,
|
|
||||||
checkpointConfig,
|
|
||||||
leaseManagementConfig,
|
|
||||||
lifecycleConfig,
|
|
||||||
metricsConfig,
|
|
||||||
processorConfig,
|
|
||||||
retrievalConfig);
|
|
||||||
|
|
||||||
handleCoordinatorConfig(coordinatorConfig);
|
|
||||||
handleLeaseManagementConfig(leaseManagementConfig);
|
|
||||||
handleRetrievalConfig(retrievalConfig, configsBuilder);
|
|
||||||
|
|
||||||
resolveFields(configObjects, null, new HashSet<>(Arrays.asList(ConfigsBuilder.class, PollingConfig.class)));
|
|
||||||
|
|
||||||
return new ResolvedConfiguration(
|
|
||||||
coordinatorConfig,
|
|
||||||
checkpointConfig,
|
|
||||||
leaseManagementConfig,
|
|
||||||
lifecycleConfig,
|
|
||||||
metricsConfig,
|
|
||||||
processorConfig,
|
|
||||||
retrievalConfig);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Scheduler build(ShardRecordProcessorFactory shardRecordProcessorFactory) {
|
|
||||||
return resolvedConfiguration(shardRecordProcessorFactory).build();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,73 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.Setter;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.kinesis.retrieval.polling.PollingConfig;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
@Setter
|
|
||||||
public class PollingConfigBean implements RetrievalConfigBuilder {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is used to auto-generate a delegate by Lombok at {@link MultiLangDaemonConfiguration#getPollingConfig()}
|
|
||||||
*/
|
|
||||||
interface PollingConfigBeanDelegate {
|
|
||||||
|
|
||||||
Integer getRetryGetRecordsInSeconds();
|
|
||||||
|
|
||||||
void setRetryGetRecordsInSeconds(Integer value);
|
|
||||||
|
|
||||||
Integer getMaxGetRecordsThreadPool();
|
|
||||||
|
|
||||||
void setMaxGetRecordsThreadPool(Integer value);
|
|
||||||
|
|
||||||
long getIdleTimeBetweenReadsInMillis();
|
|
||||||
|
|
||||||
void setIdleTimeBetweenReadsInMillis(long value);
|
|
||||||
|
|
||||||
int getMaxRecords();
|
|
||||||
|
|
||||||
void setMaxRecords(int value);
|
|
||||||
}
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = PollingConfig.class, convertToOptional = true)
|
|
||||||
private Integer retryGetRecordsInSeconds;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = PollingConfig.class, convertToOptional = true)
|
|
||||||
private Integer maxGetRecordsThreadPool;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = PollingConfig.class)
|
|
||||||
private long idleTimeBetweenReadsInMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = PollingConfig.class)
|
|
||||||
private int maxRecords;
|
|
||||||
|
|
||||||
public boolean anyPropertiesSet() {
|
|
||||||
return retryGetRecordsInSeconds != null
|
|
||||||
|| maxGetRecordsThreadPool != null
|
|
||||||
|| idleTimeBetweenReadsInMillis != 0
|
|
||||||
|| maxRecords != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public PollingConfig build(KinesisAsyncClient kinesisAsyncClient, MultiLangDaemonConfiguration parent) {
|
|
||||||
return ConfigurationSettableUtils.resolveFields(
|
|
||||||
this, new PollingConfig(parent.getStreamName(), kinesisAsyncClient));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.kinesis.retrieval.RetrievalSpecificConfig;
|
|
||||||
|
|
||||||
public interface RetrievalConfigBuilder {
|
|
||||||
/**
|
|
||||||
* Creates a retrieval specific configuration using the supplied parameters, and internal class parameters
|
|
||||||
*
|
|
||||||
* @param kinesisAsyncClient
|
|
||||||
* the client that will be provided to the RetrievalSpecificConfig constructor
|
|
||||||
* @param parent
|
|
||||||
* configuration parameters that this builder can access to configure it self
|
|
||||||
* @return a RetrievalSpecificConfig configured according to the customer's configuration.
|
|
||||||
*/
|
|
||||||
public RetrievalSpecificConfig build(KinesisAsyncClient kinesisAsyncClient, MultiLangDaemonConfiguration parent);
|
|
||||||
}
|
|
||||||
|
|
@ -1,64 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.function.Function;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.commons.lang3.Validate;
|
|
||||||
|
|
||||||
@Slf4j
|
|
||||||
public enum RetrievalMode {
|
|
||||||
FANOUT(MultiLangDaemonConfiguration::getFanoutConfig),
|
|
||||||
POLLING(MultiLangDaemonConfiguration::getPollingConfig),
|
|
||||||
DEFAULT(RetrievalMode::decideForDefault);
|
|
||||||
|
|
||||||
private final Function<MultiLangDaemonConfiguration, RetrievalConfigBuilder> builderFor;
|
|
||||||
|
|
||||||
public RetrievalConfigBuilder builder(MultiLangDaemonConfiguration configuration) {
|
|
||||||
return builderFor.apply(configuration);
|
|
||||||
}
|
|
||||||
|
|
||||||
RetrievalMode(Function<MultiLangDaemonConfiguration, RetrievalConfigBuilder> builderFor) {
|
|
||||||
this.builderFor = builderFor;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static RetrievalMode from(String source) {
|
|
||||||
Validate.notEmpty(source);
|
|
||||||
try {
|
|
||||||
return RetrievalMode.valueOf(source.toUpperCase());
|
|
||||||
} catch (IllegalArgumentException iae) {
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"Unknown retrieval type '" + source + "'. Available retrieval types: " + availableRetrievalModes());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static String availableRetrievalModes() {
|
|
||||||
return "(" + Arrays.stream(RetrievalMode.values()).map(Enum::name).collect(Collectors.joining(", ")) + ")";
|
|
||||||
}
|
|
||||||
|
|
||||||
private static RetrievalConfigBuilder decideForDefault(MultiLangDaemonConfiguration configuration) {
|
|
||||||
if (configuration.getPollingConfig().anyPropertiesSet()) {
|
|
||||||
log.warn("Some polling properties have been set, defaulting to polling. "
|
|
||||||
+ "To switch to Fanout either add `RetrievalMode=FANOUT` to your "
|
|
||||||
+ "properties or remove the any configuration for polling.");
|
|
||||||
return configuration.getPollingConfig();
|
|
||||||
}
|
|
||||||
return configuration.getFanoutConfig();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.lang.reflect.Method;
|
|
||||||
|
|
||||||
import lombok.Data;
|
|
||||||
|
|
||||||
@Data
|
|
||||||
class TypeTag {
|
|
||||||
final Class<?> type;
|
|
||||||
final boolean hasConverter;
|
|
||||||
final Method builderMethod;
|
|
||||||
}
|
|
||||||
|
|
@ -1,82 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.Setter;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
|
||||||
import software.amazon.kinesis.leases.LeaseManagementConfig.WorkerMetricsTableConfig;
|
|
||||||
import software.amazon.kinesis.multilang.config.converter.TagConverter.TagCollection;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
@Setter
|
|
||||||
public class WorkerMetricStatsTableConfigBean {
|
|
||||||
|
|
||||||
interface WorkerMetricsTableConfigBeanDelegate {
|
|
||||||
String getWorkerMetricsTableName();
|
|
||||||
|
|
||||||
void setWorkerMetricsTableName(String value);
|
|
||||||
|
|
||||||
BillingMode getWorkerMetricsBillingMode();
|
|
||||||
|
|
||||||
void setWorkerMetricsBillingMode(BillingMode value);
|
|
||||||
|
|
||||||
long getWorkerMetricsReadCapacity();
|
|
||||||
|
|
||||||
void setWorkerMetricsReadCapacity(long value);
|
|
||||||
|
|
||||||
long getWorkerMetricsWriteCapacity();
|
|
||||||
|
|
||||||
void setWorkerMetricsWriteCapacity(long value);
|
|
||||||
|
|
||||||
Boolean getWorkerMetricsPointInTimeRecoveryEnabled();
|
|
||||||
|
|
||||||
void setWorkerMetricsPointInTimeRecoveryEnabled(Boolean value);
|
|
||||||
|
|
||||||
Boolean getWorkerMetricsDeletionProtectionEnabled();
|
|
||||||
|
|
||||||
void setWorkerMetricsDeletionProtectionEnabled(Boolean value);
|
|
||||||
|
|
||||||
TagCollection getWorkerMetricsTags();
|
|
||||||
|
|
||||||
void setWorkerMetricsTags(TagCollection value);
|
|
||||||
}
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerMetricsTableConfig.class, methodName = "tableName")
|
|
||||||
private String workerMetricsTableName;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerMetricsTableConfig.class, methodName = "billingMode")
|
|
||||||
private BillingMode workerMetricsBillingMode;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerMetricsTableConfig.class, methodName = "readCapacity")
|
|
||||||
private long workerMetricsReadCapacity;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerMetricsTableConfig.class, methodName = "writeCapacity")
|
|
||||||
private long workerMetricsWriteCapacity;
|
|
||||||
|
|
||||||
@ConfigurationSettable(
|
|
||||||
configurationClass = WorkerMetricsTableConfig.class,
|
|
||||||
methodName = "pointInTimeRecoveryEnabled")
|
|
||||||
private Boolean workerMetricsPointInTimeRecoveryEnabled;
|
|
||||||
|
|
||||||
@ConfigurationSettable(
|
|
||||||
configurationClass = WorkerMetricsTableConfig.class,
|
|
||||||
methodName = "deletionProtectionEnabled")
|
|
||||||
private Boolean workerMetricsDeletionProtectionEnabled;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerMetricsTableConfig.class, methodName = "tags")
|
|
||||||
private TagCollection workerMetricsTags;
|
|
||||||
}
|
|
||||||
|
|
@ -1,106 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.Setter;
|
|
||||||
import software.amazon.kinesis.leases.LeaseManagementConfig.WorkerUtilizationAwareAssignmentConfig;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
@Setter
|
|
||||||
public class WorkerUtilizationAwareAssignmentConfigBean {
|
|
||||||
|
|
||||||
interface WorkerUtilizationAwareAssignmentConfigBeanDelegate {
|
|
||||||
long getInMemoryWorkerMetricsCaptureFrequencyMillis();
|
|
||||||
|
|
||||||
void setInMemoryWorkerMetricsCaptureFrequencyMillis(long value);
|
|
||||||
|
|
||||||
long getWorkerMetricsReporterFreqInMillis();
|
|
||||||
|
|
||||||
void setWorkerMetricsReporterFreqInMillis(long value);
|
|
||||||
|
|
||||||
int getNoOfPersistedMetricsPerWorkerMetrics();
|
|
||||||
|
|
||||||
void setNoOfPersistedMetricsPerWorkerMetrics(int value);
|
|
||||||
|
|
||||||
Boolean getDisableWorkerMetrics();
|
|
||||||
|
|
||||||
void setDisableWorkerMetrics(Boolean value);
|
|
||||||
|
|
||||||
double getMaxThroughputPerHostKBps();
|
|
||||||
|
|
||||||
void setMaxThroughputPerHostKBps(double value);
|
|
||||||
|
|
||||||
int getDampeningPercentage();
|
|
||||||
|
|
||||||
void setDampeningPercentage(int value);
|
|
||||||
|
|
||||||
int getReBalanceThresholdPercentage();
|
|
||||||
|
|
||||||
void setReBalanceThresholdPercentage(int value);
|
|
||||||
|
|
||||||
Boolean getAllowThroughputOvershoot();
|
|
||||||
|
|
||||||
void setAllowThroughputOvershoot(Boolean value);
|
|
||||||
|
|
||||||
int getVarianceBalancingFrequency();
|
|
||||||
|
|
||||||
void setVarianceBalancingFrequency(int value);
|
|
||||||
|
|
||||||
double getWorkerMetricsEMAAlpha();
|
|
||||||
|
|
||||||
void setWorkerMetricsEMAAlpha(double value);
|
|
||||||
|
|
||||||
void setStaleWorkerMetricsEntryCleanupDuration(Duration value);
|
|
||||||
|
|
||||||
Duration getStaleWorkerMetricsEntryCleanupDuration();
|
|
||||||
}
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private long inMemoryWorkerMetricsCaptureFrequencyMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private long workerMetricsReporterFreqInMillis;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private int noOfPersistedMetricsPerWorkerMetrics;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private Boolean disableWorkerMetrics;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private double maxThroughputPerHostKBps;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private int dampeningPercentage;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private int reBalanceThresholdPercentage;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private Boolean allowThroughputOvershoot;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private int varianceBalancingFrequency;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private double workerMetricsEMAAlpha;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = WorkerUtilizationAwareAssignmentConfig.class)
|
|
||||||
private Duration staleWorkerMetricsEntryCleanupDuration;
|
|
||||||
}
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config.converter;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
|
||||||
|
|
||||||
import org.apache.commons.beanutils.Converter;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Converter that converts Duration text representation to a Duration object.
|
|
||||||
* Refer to {@code Duration.parse} javadocs for the exact text representation.
|
|
||||||
*/
|
|
||||||
public class DurationConverter implements Converter {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public <T> T convert(Class<T> type, Object value) {
|
|
||||||
if (value == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (type != Duration.class) {
|
|
||||||
throw new ConversionException("Can only convert to Duration");
|
|
||||||
}
|
|
||||||
|
|
||||||
String durationString = value.toString().trim();
|
|
||||||
final Duration duration = Duration.parse(durationString);
|
|
||||||
if (duration.isNegative()) {
|
|
||||||
throw new ConversionException("Negative values are not permitted for duration: " + durationString);
|
|
||||||
}
|
|
||||||
|
|
||||||
return type.cast(duration);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class ConversionException extends RuntimeException {
|
|
||||||
public ConversionException(String message) {
|
|
||||||
super(message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,67 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config.converter;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.commons.beanutils.Converter;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.Tag;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Converter that converts to a Collection of Tag object.
|
|
||||||
* The text format accepted are as follows:
|
|
||||||
* tagPropertyName = key1=value1,key2=value2,...
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
public class TagConverter implements Converter {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public <T> T convert(Class<T> type, Object value) {
|
|
||||||
if (value == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!type.isAssignableFrom(TagCollection.class)) {
|
|
||||||
throw new ConversionException("Can only convert to Collection<Tag>");
|
|
||||||
}
|
|
||||||
|
|
||||||
final TagCollection collection = new TagCollection();
|
|
||||||
final String tagString = value.toString().trim();
|
|
||||||
final String[] keyValuePairs = tagString.split(",");
|
|
||||||
for (String keyValuePair : keyValuePairs) {
|
|
||||||
final String[] tokens = keyValuePair.trim().split("=");
|
|
||||||
if (tokens.length != 2) {
|
|
||||||
log.warn("Invalid tag {}, ignoring it", keyValuePair);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
final Tag tag =
|
|
||||||
Tag.builder().key(tokens[0].trim()).value(tokens[1].trim()).build();
|
|
||||||
log.info("Created tag {}", tag);
|
|
||||||
collection.add(tag);
|
|
||||||
}
|
|
||||||
|
|
||||||
return type.cast(collection);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class ConversionException extends RuntimeException {
|
|
||||||
public ConversionException(String message) {
|
|
||||||
super(message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class TagCollection extends ArrayList<Tag> {}
|
|
||||||
}
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.messages;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Used to indicate to the client that the record process has lost its lease.
|
|
||||||
*/
|
|
||||||
public class LeaseLostMessage extends Message {
|
|
||||||
|
|
||||||
public static final String ACTION = "leaseLost";
|
|
||||||
}
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8" ?>
|
|
||||||
<!--
|
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
-->
|
|
||||||
<configuration>
|
|
||||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
|
||||||
<encoder>
|
|
||||||
<pattern>%d [%thread] %-5level %logger{36} [%mdc{ShardId:-NONE}] - %msg %n</pattern>
|
|
||||||
</encoder>
|
|
||||||
</appender>
|
|
||||||
|
|
||||||
<root level="INFO">
|
|
||||||
<appender-ref ref="CONSOLE" />
|
|
||||||
</root>
|
|
||||||
</configuration>
|
|
||||||
|
|
@ -1,211 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang;
|
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import junit.framework.Assert;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.Mockito;
|
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentials;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.regions.Region;
|
|
||||||
import software.amazon.kinesis.multilang.config.KinesisClientLibConfigurator;
|
|
||||||
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
|
||||||
public class MultiLangDaemonConfigTest {
|
|
||||||
private static final String FILENAME = "multilang.properties";
|
|
||||||
private static final String EXE = "TestExe.exe";
|
|
||||||
private static final String APPLICATION_NAME = MultiLangDaemonConfigTest.class.getSimpleName();
|
|
||||||
private static final String STREAM_NAME = "fakeStream";
|
|
||||||
private static final String STREAM_NAME_IN_ARN = "FAKE_STREAM_NAME";
|
|
||||||
private static final Region REGION = Region.US_EAST_1;
|
|
||||||
private static final String STREAM_ARN = "arn:aws:kinesis:us-east-2:012345678987:stream/" + STREAM_NAME_IN_ARN;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private ClassLoader classLoader;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private AwsCredentialsProvider credentialsProvider;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private AwsCredentials creds;
|
|
||||||
|
|
||||||
private final KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator();
|
|
||||||
private MultiLangDaemonConfig deamonConfig;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Instantiate a MultiLangDaemonConfig object
|
|
||||||
* @param streamName
|
|
||||||
* @param streamArn
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public void setup(String streamName, String streamArn) throws IOException {
|
|
||||||
String properties = String.format(
|
|
||||||
"executableName = %s\n"
|
|
||||||
+ "applicationName = %s\n"
|
|
||||||
+ "AwsCredentialsProvider = DefaultCredentialsProvider\n"
|
|
||||||
+ "processingLanguage = malbolge\n"
|
|
||||||
+ "regionName = %s\n",
|
|
||||||
EXE, APPLICATION_NAME, "us-east-1");
|
|
||||||
|
|
||||||
if (streamName != null) {
|
|
||||||
properties += String.format("streamName = %s\n", streamName);
|
|
||||||
}
|
|
||||||
if (streamArn != null) {
|
|
||||||
properties += String.format("streamArn = %s\n", streamArn);
|
|
||||||
}
|
|
||||||
classLoader = Mockito.mock(ClassLoader.class);
|
|
||||||
|
|
||||||
Mockito.doReturn(new ByteArrayInputStream(properties.getBytes()))
|
|
||||||
.when(classLoader)
|
|
||||||
.getResourceAsStream(FILENAME);
|
|
||||||
|
|
||||||
when(credentialsProvider.resolveCredentials()).thenReturn(creds);
|
|
||||||
when(creds.accessKeyId()).thenReturn("cool-user");
|
|
||||||
deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testConstructorFailsBecauseStreamArnIsInvalid() throws Exception {
|
|
||||||
setup("", "this_is_not_a_valid_arn");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testConstructorFailsBecauseStreamArnIsInvalid2() throws Exception {
|
|
||||||
setup("", "arn:aws:kinesis:us-east-2:ACCOUNT_ID:BadFormatting:stream/" + STREAM_NAME_IN_ARN);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testConstructorFailsBecauseStreamNameAndArnAreEmpty() throws Exception {
|
|
||||||
setup("", "");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = NullPointerException.class)
|
|
||||||
public void testConstructorFailsBecauseStreamNameAndArnAreNull() throws Exception {
|
|
||||||
setup(null, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = NullPointerException.class)
|
|
||||||
public void testConstructorFailsBecauseStreamNameIsNullAndArnIsEmpty() throws Exception {
|
|
||||||
setup(null, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testConstructorFailsBecauseStreamNameIsEmptyAndArnIsNull() throws Exception {
|
|
||||||
setup("", null);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testConstructorUsingStreamName() throws IOException {
|
|
||||||
setup(STREAM_NAME, null);
|
|
||||||
|
|
||||||
assertConfigurationsMatch(STREAM_NAME, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testConstructorUsingStreamNameAndStreamArnIsEmpty() throws IOException {
|
|
||||||
setup(STREAM_NAME, "");
|
|
||||||
|
|
||||||
assertConfigurationsMatch(STREAM_NAME, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testConstructorUsingStreamNameAndStreamArnIsWhitespace() throws IOException {
|
|
||||||
setup(STREAM_NAME, " ");
|
|
||||||
|
|
||||||
assertConfigurationsMatch(STREAM_NAME, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testConstructorUsingStreamArn() throws IOException {
|
|
||||||
setup(null, STREAM_ARN);
|
|
||||||
|
|
||||||
assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testConstructorUsingStreamNameAsEmptyAndStreamArn() throws IOException {
|
|
||||||
setup("", STREAM_ARN);
|
|
||||||
|
|
||||||
assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testConstructorUsingStreamArnOverStreamName() throws IOException {
|
|
||||||
setup(STREAM_NAME, STREAM_ARN);
|
|
||||||
|
|
||||||
assertConfigurationsMatch(STREAM_NAME_IN_ARN, STREAM_ARN);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Verify the daemonConfig properties are what we expect them to be.
|
|
||||||
*
|
|
||||||
* @param expectedStreamName
|
|
||||||
*/
|
|
||||||
private void assertConfigurationsMatch(String expectedStreamName, String expectedStreamArn) {
|
|
||||||
final MultiLangDaemonConfiguration multiLangConfiguration = deamonConfig.getMultiLangDaemonConfiguration();
|
|
||||||
assertNotNull(deamonConfig.getExecutorService());
|
|
||||||
assertNotNull(multiLangConfiguration);
|
|
||||||
assertNotNull(deamonConfig.getRecordProcessorFactory());
|
|
||||||
|
|
||||||
assertEquals(EXE, deamonConfig.getRecordProcessorFactory().getCommandArray()[0]);
|
|
||||||
assertEquals(APPLICATION_NAME, multiLangConfiguration.getApplicationName());
|
|
||||||
assertEquals(expectedStreamName, multiLangConfiguration.getStreamName());
|
|
||||||
assertEquals(REGION, multiLangConfiguration.getDynamoDbClient().get("region"));
|
|
||||||
assertEquals(REGION, multiLangConfiguration.getCloudWatchClient().get("region"));
|
|
||||||
assertEquals(REGION, multiLangConfiguration.getKinesisClient().get("region"));
|
|
||||||
assertEquals(expectedStreamArn, multiLangConfiguration.getStreamArn());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testPropertyValidation() {
|
|
||||||
String propertiesNoExecutableName = "applicationName = testApp \n" + "streamName = fakeStream \n"
|
|
||||||
+ "AwsCredentialsProvider = DefaultCredentialsProvider\n" + "processingLanguage = malbolge";
|
|
||||||
ClassLoader classLoader = Mockito.mock(ClassLoader.class);
|
|
||||||
|
|
||||||
Mockito.doReturn(new ByteArrayInputStream(propertiesNoExecutableName.getBytes()))
|
|
||||||
.when(classLoader)
|
|
||||||
.getResourceAsStream(FILENAME);
|
|
||||||
|
|
||||||
try {
|
|
||||||
new MultiLangDaemonConfig(FILENAME, classLoader, configurator);
|
|
||||||
Assert.fail("Construction of the config should have failed due to property validation failing.");
|
|
||||||
} catch (IllegalArgumentException e) {
|
|
||||||
// Good
|
|
||||||
} catch (IOException e) {
|
|
||||||
Assert.fail();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test the loading of a "real" properties file. This test should catch
|
|
||||||
* any issues which might arise if there is a discrepancy between reality
|
|
||||||
* and mocking.
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testActualPropertiesFile() throws Exception {
|
|
||||||
new MultiLangDaemonConfig(FILENAME);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,281 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Future;
|
|
||||||
|
|
||||||
import ch.qos.logback.classic.LoggerContext;
|
|
||||||
import ch.qos.logback.classic.joran.JoranConfigurator;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Rule;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.rules.ExpectedException;
|
|
||||||
import org.junit.rules.TemporaryFolder;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
import software.amazon.kinesis.coordinator.Scheduler;
|
|
||||||
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration;
|
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.containsString;
|
|
||||||
import static org.hamcrest.Matchers.empty;
|
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
|
||||||
import static org.hamcrest.Matchers.nullValue;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
import static org.mockito.Matchers.anyObject;
|
|
||||||
import static org.mockito.Matchers.eq;
|
|
||||||
import static org.mockito.Mockito.doNothing;
|
|
||||||
import static org.mockito.Mockito.doThrow;
|
|
||||||
import static org.mockito.Mockito.spy;
|
|
||||||
import static org.mockito.Mockito.verify;
|
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
|
||||||
public class MultiLangDaemonTest {
|
|
||||||
@Mock
|
|
||||||
private Scheduler scheduler;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private MultiLangDaemonConfig config;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private ExecutorService executorService;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private Future<Integer> futureInteger;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private MultiLangDaemonConfiguration multiLangDaemonConfiguration;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private Runtime runtime;
|
|
||||||
|
|
||||||
@Rule
|
|
||||||
public ExpectedException expectedException = ExpectedException.none();
|
|
||||||
|
|
||||||
@Rule
|
|
||||||
public final TemporaryFolder temporaryFolder = new TemporaryFolder();
|
|
||||||
|
|
||||||
private MultiLangDaemon daemon;
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void setup() {
|
|
||||||
daemon = new MultiLangDaemon() {
|
|
||||||
@Override
|
|
||||||
Scheduler buildScheduler(final MultiLangDaemonConfig configuration) {
|
|
||||||
return scheduler;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSuccessfulNoOptionsJCommanderBuild() {
|
|
||||||
String testPropertiesFile = "/test/properties/file";
|
|
||||||
MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments();
|
|
||||||
daemon.buildJCommanderAndParseArgs(arguments, new String[] {testPropertiesFile});
|
|
||||||
|
|
||||||
assertThat(arguments.propertiesFile, nullValue());
|
|
||||||
assertThat(arguments.logConfiguration, nullValue());
|
|
||||||
assertThat(arguments.parameters.size(), equalTo(1));
|
|
||||||
assertThat(arguments.parameters.get(0), equalTo(testPropertiesFile));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSuccessfulOptionsJCommanderBuild() {
|
|
||||||
String propertiesOption = "/test/properties/file/option";
|
|
||||||
String propertiesFileArgs = "/test/properties/args";
|
|
||||||
String[] args = new String[] {"-p", propertiesOption, propertiesFileArgs};
|
|
||||||
MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments();
|
|
||||||
daemon.buildJCommanderAndParseArgs(arguments, args);
|
|
||||||
|
|
||||||
assertThat(arguments.propertiesFile, equalTo(propertiesOption));
|
|
||||||
assertThat(arguments.logConfiguration, nullValue());
|
|
||||||
assertThat(arguments.parameters.size(), equalTo(1));
|
|
||||||
assertThat(arguments.parameters.get(0), equalTo(propertiesFileArgs));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testEmptyArgsJCommanderBuild() {
|
|
||||||
MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments();
|
|
||||||
String[] args = new String[] {};
|
|
||||||
daemon.buildJCommanderAndParseArgs(arguments, args);
|
|
||||||
|
|
||||||
assertThat(arguments.propertiesFile, nullValue());
|
|
||||||
assertThat(arguments.logConfiguration, nullValue());
|
|
||||||
assertThat(arguments.parameters, empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSuccessfulLoggingConfiguration() {
|
|
||||||
LoggerContext loggerContext = spy((LoggerContext) LoggerFactory.getILoggerFactory());
|
|
||||||
JoranConfigurator configurator = spy(new JoranConfigurator());
|
|
||||||
|
|
||||||
String logConfiguration =
|
|
||||||
this.getClass().getClassLoader().getResource("logback.xml").getPath();
|
|
||||||
daemon.configureLogging(logConfiguration, loggerContext, configurator);
|
|
||||||
|
|
||||||
verify(loggerContext).reset();
|
|
||||||
verify(configurator).setContext(eq(loggerContext));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testUnsuccessfulLoggingConfiguration() {
|
|
||||||
LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
|
|
||||||
JoranConfigurator configurator = new JoranConfigurator();
|
|
||||||
|
|
||||||
expectedException.expect(RuntimeException.class);
|
|
||||||
expectedException.expectMessage(containsString("Error while loading log configuration:"));
|
|
||||||
|
|
||||||
String logConfiguration = "blahblahblah";
|
|
||||||
|
|
||||||
daemon.configureLogging(logConfiguration, loggerContext, configurator);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testNoPropertiesFileArgumentOrOption() {
|
|
||||||
expectedException.expect(RuntimeException.class);
|
|
||||||
expectedException.expectMessage(equalTo("Properties file missing, please provide a properties file"));
|
|
||||||
|
|
||||||
MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments();
|
|
||||||
|
|
||||||
daemon.validateAndGetPropertiesFileName(arguments);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSuccessfulPropertiesArgument() {
|
|
||||||
String expectedPropertiesFile = "/test/properties/file";
|
|
||||||
MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments();
|
|
||||||
arguments.parameters = Collections.singletonList(expectedPropertiesFile);
|
|
||||||
|
|
||||||
String propertiesFile = daemon.validateAndGetPropertiesFileName(arguments);
|
|
||||||
|
|
||||||
assertThat(propertiesFile, equalTo(expectedPropertiesFile));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testPropertiesOptionsOverrideArgument() {
|
|
||||||
String propertiesArgument = "/test/properties/argument";
|
|
||||||
String propertiesOptions = "/test/properties/options";
|
|
||||||
|
|
||||||
MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments();
|
|
||||||
arguments.parameters = Collections.singletonList(propertiesArgument);
|
|
||||||
arguments.propertiesFile = propertiesOptions;
|
|
||||||
|
|
||||||
String propertiesFile = daemon.validateAndGetPropertiesFileName(arguments);
|
|
||||||
|
|
||||||
assertThat(propertiesFile, equalTo(propertiesOptions));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testExtraArgumentsFailure() {
|
|
||||||
expectedException.expect(RuntimeException.class);
|
|
||||||
expectedException.expectMessage(containsString("Expected a single argument, but found multiple arguments."));
|
|
||||||
|
|
||||||
MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments();
|
|
||||||
arguments.parameters = Arrays.asList("parameter1", "parameter2");
|
|
||||||
|
|
||||||
daemon.validateAndGetPropertiesFileName(arguments);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testBuildMultiLangConfigMissingPropertiesFile() {
|
|
||||||
expectedException.expect(RuntimeException.class);
|
|
||||||
expectedException.expectMessage(containsString("Error while reading properties file:"));
|
|
||||||
|
|
||||||
daemon.buildMultiLangDaemonConfig("blahblahblah");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testBuildMultiLangConfigWithIncorrectInformation() throws IOException {
|
|
||||||
File propertiesFile = temporaryFolder.newFile("temp.properties");
|
|
||||||
|
|
||||||
expectedException.expect(RuntimeException.class);
|
|
||||||
expectedException.expectMessage(containsString("Must provide an executable name in the properties file"));
|
|
||||||
|
|
||||||
daemon.buildMultiLangDaemonConfig(propertiesFile.getAbsolutePath());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSuccessfulSubmitRunnerAndWait() throws Exception {
|
|
||||||
int expectedExitCode = 0;
|
|
||||||
|
|
||||||
MultiLangDaemon.MultiLangRunner runner = new MultiLangDaemon.MultiLangRunner(scheduler);
|
|
||||||
when(config.getExecutorService()).thenReturn(executorService);
|
|
||||||
when(executorService.submit(eq(runner))).thenReturn(futureInteger);
|
|
||||||
when(futureInteger.get()).thenReturn(expectedExitCode);
|
|
||||||
|
|
||||||
int exitCode = daemon.submitRunnerAndWait(config, runner);
|
|
||||||
|
|
||||||
assertThat(exitCode, equalTo(expectedExitCode));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testErrorSubmitRunnerAndWait() throws Exception {
|
|
||||||
int expectedExitCode = 1;
|
|
||||||
|
|
||||||
MultiLangDaemon.MultiLangRunner runner = new MultiLangDaemon.MultiLangRunner(scheduler);
|
|
||||||
when(config.getExecutorService()).thenReturn(executorService);
|
|
||||||
when(executorService.submit(eq(runner))).thenReturn(futureInteger);
|
|
||||||
when(futureInteger.get()).thenThrow(ExecutionException.class);
|
|
||||||
|
|
||||||
int exitCode = daemon.submitRunnerAndWait(config, runner);
|
|
||||||
|
|
||||||
assertThat(exitCode, equalTo(expectedExitCode));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetupShutdownHook() {
|
|
||||||
when(config.getMultiLangDaemonConfiguration()).thenReturn(multiLangDaemonConfiguration);
|
|
||||||
when(multiLangDaemonConfiguration.getShutdownGraceMillis()).thenReturn(1000L);
|
|
||||||
doNothing().when(runtime).addShutdownHook(anyObject());
|
|
||||||
|
|
||||||
MultiLangDaemon.MultiLangRunner runner = new MultiLangDaemon.MultiLangRunner(scheduler);
|
|
||||||
daemon.setupShutdownHook(runtime, runner, config);
|
|
||||||
|
|
||||||
verify(multiLangDaemonConfiguration).getShutdownGraceMillis();
|
|
||||||
verify(runtime).addShutdownHook(anyObject());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSuccessfulRunner() throws Exception {
|
|
||||||
MultiLangDaemon.MultiLangRunner runner = new MultiLangDaemon.MultiLangRunner(scheduler);
|
|
||||||
doNothing().when(scheduler).run();
|
|
||||||
|
|
||||||
int exit = runner.call();
|
|
||||||
|
|
||||||
assertThat(exit, equalTo(0));
|
|
||||||
|
|
||||||
verify(scheduler).run();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testUnsuccessfulRunner() throws Exception {
|
|
||||||
MultiLangDaemon.MultiLangRunner runner = new MultiLangDaemon.MultiLangRunner(scheduler);
|
|
||||||
doThrow(Exception.class).when(scheduler).run();
|
|
||||||
|
|
||||||
int exit = runner.call();
|
|
||||||
|
|
||||||
assertThat(exit, equalTo(1));
|
|
||||||
|
|
||||||
verify(scheduler).run();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,110 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang;
|
|
||||||
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
|
||||||
import software.amazon.awssdk.regions.Region;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.mockito.Mockito.verify;
|
|
||||||
import static org.mockito.Mockito.verifyZeroInteractions;
|
|
||||||
import static software.amazon.kinesis.multilang.NestedPropertyKey.ENDPOINT;
|
|
||||||
import static software.amazon.kinesis.multilang.NestedPropertyKey.ENDPOINT_REGION;
|
|
||||||
import static software.amazon.kinesis.multilang.NestedPropertyKey.EXTERNAL_ID;
|
|
||||||
import static software.amazon.kinesis.multilang.NestedPropertyKey.parse;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
|
||||||
public class NestedPropertyKeyTest {
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private NestedPropertyProcessor mockProcessor;
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testExternalId() {
|
|
||||||
final String expectedId = "eid";
|
|
||||||
|
|
||||||
parse(mockProcessor, createKey(EXTERNAL_ID, expectedId));
|
|
||||||
verify(mockProcessor).acceptExternalId(expectedId);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testEndpoint() {
|
|
||||||
final String expectedEndpoint = "https://sts.us-east-1.amazonaws.com";
|
|
||||||
final String expectedRegion = "us-east-1";
|
|
||||||
final String param = createKey(ENDPOINT, expectedEndpoint + "^" + expectedRegion);
|
|
||||||
|
|
||||||
parse(mockProcessor, param);
|
|
||||||
verify(mockProcessor).acceptEndpoint(expectedEndpoint, expectedRegion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testInvalidEndpoint() {
|
|
||||||
parse(mockProcessor, createKey(ENDPOINT, "value-sans-caret-delimiter"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testInvalidEndpointDoubleCaret() {
|
|
||||||
parse(mockProcessor, createKey(ENDPOINT, "https://sts.us-east-1.amazonaws.com^us-east-1^borkbork"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testEndpointRegion() {
|
|
||||||
final Region expectedRegion = Region.US_GOV_WEST_1;
|
|
||||||
|
|
||||||
parse(mockProcessor, createKey(ENDPOINT_REGION, expectedRegion.id()));
|
|
||||||
verify(mockProcessor).acceptEndpointRegion(expectedRegion);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testInvalidEndpointRegion() {
|
|
||||||
parse(mockProcessor, createKey(ENDPOINT_REGION, "snuffleupagus"));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test that the literal nested key (i.e., {@code key=} in {@code some_val|key=nested_val})
|
|
||||||
* does not change. Any change to an existing literal key is not backwards-compatible.
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testKeysExplicitly() {
|
|
||||||
// Adding a new enum will deliberately cause this assert to fail, and
|
|
||||||
// therefore raise awareness for this explicit test. Add-and-remove may
|
|
||||||
// keep the number unchanged yet will also break (by removing an enum).
|
|
||||||
assertEquals(3, NestedPropertyKey.values().length);
|
|
||||||
|
|
||||||
assertEquals("endpoint", ENDPOINT.getNestedKey());
|
|
||||||
assertEquals("endpointRegion", ENDPOINT_REGION.getNestedKey());
|
|
||||||
assertEquals("externalId", EXTERNAL_ID.getNestedKey());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testNonmatchingParameters() {
|
|
||||||
final String[] params = new String[] {
|
|
||||||
null,
|
|
||||||
"",
|
|
||||||
"hello world", // no nested key
|
|
||||||
"foo=bar", // nested key, but is not a recognized key
|
|
||||||
createKey(EXTERNAL_ID, "eid") + "=extra", // valid key made invalid by second '='
|
|
||||||
};
|
|
||||||
parse(mockProcessor, params);
|
|
||||||
verifyZeroInteractions(mockProcessor);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static String createKey(final NestedPropertyKey key, final String value) {
|
|
||||||
return key.getNestedKey() + "=" + value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,70 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang.auth;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
|
|
||||||
public class KclStsAssumeRoleCredentialsProviderTest {
|
|
||||||
|
|
||||||
private static final String ARN = "arn";
|
|
||||||
private static final String SESSION_NAME = "sessionName";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test that the constructor doesn't throw an out-of-bounds exception if
|
|
||||||
* there are no parameters beyond the required ARN and session name.
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testConstructorWithoutOptionalParams() {
|
|
||||||
new KclStsAssumeRoleCredentialsProvider(new String[] {ARN, SESSION_NAME, "endpointRegion=us-east-1"});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testAcceptEndpoint() {
|
|
||||||
// discovered exception during e2e testing; therefore, this test is
|
|
||||||
// to simply verify the constructed STS client doesn't go *boom*
|
|
||||||
final KclStsAssumeRoleCredentialsProvider provider =
|
|
||||||
new KclStsAssumeRoleCredentialsProvider(ARN, SESSION_NAME, "endpointRegion=us-east-1");
|
|
||||||
provider.acceptEndpoint("endpoint", "us-east-1");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testVarArgs() {
|
|
||||||
for (final String[] varargs : Arrays.asList(
|
|
||||||
new String[] {ARN, SESSION_NAME, "externalId=eid", "foo", "endpointRegion=us-east-1"},
|
|
||||||
new String[] {ARN, SESSION_NAME, "foo", "externalId=eid", "endpointRegion=us-east-1"})) {
|
|
||||||
final VarArgsSpy provider = new VarArgsSpy(varargs);
|
|
||||||
assertEquals("eid", provider.externalId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class VarArgsSpy extends KclStsAssumeRoleCredentialsProvider {
|
|
||||||
|
|
||||||
private String externalId;
|
|
||||||
|
|
||||||
public VarArgsSpy(String[] args) {
|
|
||||||
super(args);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void acceptExternalId(final String externalId) {
|
|
||||||
this.externalId = externalId;
|
|
||||||
super.acceptExternalId(externalId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,237 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import lombok.ToString;
|
|
||||||
import org.hamcrest.Description;
|
|
||||||
import org.hamcrest.Matcher;
|
|
||||||
import org.hamcrest.TypeSafeDiagnosingMatcher;
|
|
||||||
import org.junit.Test;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentials;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain;
|
|
||||||
import software.amazon.awssdk.services.sts.auth.StsAssumeRoleCredentialsProvider;
|
|
||||||
import software.amazon.kinesis.multilang.auth.KclStsAssumeRoleCredentialsProvider;
|
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
|
||||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
|
|
||||||
public class AwsCredentialsProviderPropertyValueDecoderTest {
|
|
||||||
|
|
||||||
private static final String TEST_ACCESS_KEY_ID = "123";
|
|
||||||
private static final String TEST_SECRET_KEY = "456";
|
|
||||||
|
|
||||||
private final String credentialName1 = AlwaysSucceedCredentialsProvider.class.getName();
|
|
||||||
private final String credentialName2 = ConstructorCredentialsProvider.class.getName();
|
|
||||||
private final String createCredentialClass = CreateProvider.class.getName();
|
|
||||||
private final AwsCredentialsProviderPropertyValueDecoder decoder = new AwsCredentialsProviderPropertyValueDecoder();
|
|
||||||
|
|
||||||
@ToString
|
|
||||||
private static class AwsCredentialsMatcher extends TypeSafeDiagnosingMatcher<AwsCredentialsProvider> {
|
|
||||||
|
|
||||||
private final Matcher<String> akidMatcher;
|
|
||||||
private final Matcher<String> secretMatcher;
|
|
||||||
private final Matcher<Class<?>> classMatcher;
|
|
||||||
|
|
||||||
public AwsCredentialsMatcher(String akid, String secret) {
|
|
||||||
this.akidMatcher = equalTo(akid);
|
|
||||||
this.secretMatcher = equalTo(secret);
|
|
||||||
this.classMatcher = instanceOf(AwsCredentialsProviderChain.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected boolean matchesSafely(AwsCredentialsProvider item, Description mismatchDescription) {
|
|
||||||
AwsCredentials actual = item.resolveCredentials();
|
|
||||||
boolean matched = true;
|
|
||||||
|
|
||||||
if (!classMatcher.matches(item)) {
|
|
||||||
classMatcher.describeMismatch(item, mismatchDescription);
|
|
||||||
matched = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!akidMatcher.matches(actual.accessKeyId())) {
|
|
||||||
akidMatcher.describeMismatch(actual.accessKeyId(), mismatchDescription);
|
|
||||||
matched = false;
|
|
||||||
}
|
|
||||||
if (!secretMatcher.matches(actual.secretAccessKey())) {
|
|
||||||
secretMatcher.describeMismatch(actual.secretAccessKey(), mismatchDescription);
|
|
||||||
matched = false;
|
|
||||||
}
|
|
||||||
return matched;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void describeTo(Description description) {
|
|
||||||
description
|
|
||||||
.appendText("An AwsCredentialsProvider that provides an AwsCredential matching: ")
|
|
||||||
.appendList("(", ", ", ")", Arrays.asList(classMatcher, akidMatcher, secretMatcher));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static AwsCredentialsMatcher hasCredentials(String akid, String secret) {
|
|
||||||
return new AwsCredentialsMatcher(akid, secret);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSingleProvider() {
|
|
||||||
AwsCredentialsProvider provider = decoder.decodeValue(credentialName1);
|
|
||||||
assertThat(provider, hasCredentials(TEST_ACCESS_KEY_ID, TEST_SECRET_KEY));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testTwoProviders() {
|
|
||||||
AwsCredentialsProvider provider = decoder.decodeValue(credentialName1 + "," + credentialName1);
|
|
||||||
assertThat(provider, hasCredentials(TEST_ACCESS_KEY_ID, TEST_SECRET_KEY));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testProfileProviderWithOneArg() {
|
|
||||||
AwsCredentialsProvider provider = decoder.decodeValue(credentialName2 + "|arg");
|
|
||||||
assertThat(provider, hasCredentials("arg", "blank"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testProfileProviderWithTwoArgs() {
|
|
||||||
AwsCredentialsProvider provider = decoder.decodeValue(credentialName2 + "|arg1|arg2");
|
|
||||||
assertThat(provider, hasCredentials("arg1", "arg2"));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test that providers in the multi-lang auth package can be resolved and instantiated.
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testKclAuthProvider() {
|
|
||||||
for (final String className : Arrays.asList(
|
|
||||||
KclStsAssumeRoleCredentialsProvider.class.getName(), // fully-qualified name
|
|
||||||
KclStsAssumeRoleCredentialsProvider.class.getSimpleName(), // name-only; needs prefix
|
|
||||||
StsAssumeRoleCredentialsProvider.class.getName(), // user passes full sts package path
|
|
||||||
StsAssumeRoleCredentialsProvider.class.getSimpleName())) {
|
|
||||||
final AwsCredentialsProvider provider =
|
|
||||||
decoder.decodeValue(className + "|arn|sessionName|endpointRegion=us-east-1");
|
|
||||||
assertNotNull(className, provider);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test that OneArgCreateProvider in the SDK v2 can process a create() method
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testEmptyCreateProvider() {
|
|
||||||
AwsCredentialsProvider provider = decoder.decodeValue(createCredentialClass);
|
|
||||||
assertThat(provider, hasCredentials(TEST_ACCESS_KEY_ID, TEST_SECRET_KEY));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test that OneArgCreateProvider in the SDK v2 can process a create(arg1) method
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testOneArgCreateProvider() {
|
|
||||||
AwsCredentialsProvider provider = decoder.decodeValue(createCredentialClass + "|testCreateProperty");
|
|
||||||
assertThat(provider, hasCredentials("testCreateProperty", TEST_SECRET_KEY));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test that a provider can be instantiated by its varargs constructor.
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testVarArgAuthProvider() {
|
|
||||||
final String[] args = new String[] {"arg1", "arg2", "arg3"};
|
|
||||||
final String className = VarArgCredentialsProvider.class.getName();
|
|
||||||
final String encodedValue = className + "|" + String.join("|", args);
|
|
||||||
|
|
||||||
final AwsCredentialsProvider provider = decoder.decodeValue(encodedValue);
|
|
||||||
assertEquals(Arrays.toString(args), provider.resolveCredentials().accessKeyId());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This credentials provider will always succeed
|
|
||||||
*/
|
|
||||||
public static class AlwaysSucceedCredentialsProvider implements AwsCredentialsProvider {
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
return AwsBasicCredentials.create(TEST_ACCESS_KEY_ID, TEST_SECRET_KEY);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This credentials provider needs a constructor call to instantiate it
|
|
||||||
*/
|
|
||||||
public static class ConstructorCredentialsProvider implements AwsCredentialsProvider {
|
|
||||||
|
|
||||||
private String arg1;
|
|
||||||
private String arg2;
|
|
||||||
|
|
||||||
@SuppressWarnings("unused")
|
|
||||||
public ConstructorCredentialsProvider(String arg1) {
|
|
||||||
this(arg1, "blank");
|
|
||||||
}
|
|
||||||
|
|
||||||
public ConstructorCredentialsProvider(String arg1, String arg2) {
|
|
||||||
this.arg1 = arg1;
|
|
||||||
this.arg2 = arg2;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
return AwsBasicCredentials.create(arg1, arg2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class VarArgCredentialsProvider implements AwsCredentialsProvider {
|
|
||||||
|
|
||||||
private final String[] args;
|
|
||||||
|
|
||||||
public VarArgCredentialsProvider(final String[] args) {
|
|
||||||
this.args = args;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
// KISS solution to surface the constructor args
|
|
||||||
final String flattenedArgs = Arrays.toString(args);
|
|
||||||
return AwsBasicCredentials.create(flattenedArgs, flattenedArgs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Credentials provider to test AWS SDK v2 create() methods for providers like ProfileCredentialsProvider
|
|
||||||
*/
|
|
||||||
public static class CreateProvider implements AwsCredentialsProvider {
|
|
||||||
private String accessKeyId;
|
|
||||||
|
|
||||||
private CreateProvider(String accessKeyId) {
|
|
||||||
this.accessKeyId = accessKeyId;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static CreateProvider create() {
|
|
||||||
return new CreateProvider(TEST_ACCESS_KEY_ID);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static CreateProvider create(String accessKeyId) {
|
|
||||||
return new CreateProvider(accessKeyId);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
return AwsBasicCredentials.create(accessKeyId, TEST_SECRET_KEY);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,900 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.util.function.Consumer;
|
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
import lombok.Builder;
|
|
||||||
import lombok.EqualsAndHashCode;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import lombok.ToString;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Rule;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.rules.ExpectedException;
|
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.containsString;
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
|
||||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
|
||||||
import static org.hamcrest.CoreMatchers.nullValue;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
public class BuilderDynaBeanTest {
|
|
||||||
|
|
||||||
private static boolean isBad = true;
|
|
||||||
private ConvertUtilsBean convertUtilsBean;
|
|
||||||
private BeanUtilsBean utilsBean;
|
|
||||||
|
|
||||||
@Rule
|
|
||||||
public final ExpectedException thrown = ExpectedException.none();
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void setup() {
|
|
||||||
convertUtilsBean = new ConvertUtilsBean();
|
|
||||||
utilsBean = new BeanUtilsBean(convertUtilsBean);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSimpleCreateAllParameters() throws Exception {
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleCreate.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", "first");
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1]", "last");
|
|
||||||
|
|
||||||
TestSimpleCreate expected = TestSimpleCreate.create("first", "last");
|
|
||||||
TestSimpleCreate actual = builderDynaBean.build(TestSimpleCreate.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSimpleCreateToManyParameters() throws Exception {
|
|
||||||
thrown.expect(IllegalArgumentException.class);
|
|
||||||
thrown.expectMessage(containsString("exceeds the maximum"));
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleCreate.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", "first");
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1]", "last");
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[2]", "age");
|
|
||||||
|
|
||||||
TestSimpleCreate expected = TestSimpleCreate.create("first", "last");
|
|
||||||
TestSimpleCreate actual = builderDynaBean.build(TestSimpleCreate.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSimpleCreateMissingParameter() throws Exception {
|
|
||||||
TestSimpleCreate expected = TestSimpleCreate.create(null, "last");
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleCreate.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1]", expected.lastName);
|
|
||||||
|
|
||||||
TestSimpleCreate actual = builderDynaBean.build(TestSimpleCreate.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSimpleCreateNoParameters() throws Exception {
|
|
||||||
TestSimpleCreate expected = TestSimpleCreate.create(null, null);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleCreate.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1]", expected.lastName);
|
|
||||||
|
|
||||||
TestSimpleCreate actual = builderDynaBean.build(TestSimpleCreate.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateAllParameters() throws Exception {
|
|
||||||
TestComplexCreate expected = TestComplexCreate.create(
|
|
||||||
"real", TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build());
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreate.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", expected.realName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].stringL1", expected.test1.stringL1);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].longVal", expected.test1.longVal);
|
|
||||||
|
|
||||||
TestComplexCreate actual = builderDynaBean.build(TestComplexCreate.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateSimpleParameterOnly() throws Exception {
|
|
||||||
TestComplexCreate expected = TestComplexCreate.create("real", null);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreate.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", expected.realName);
|
|
||||||
|
|
||||||
TestComplexCreate actual = builderDynaBean.build(TestComplexCreate.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateComplexParameterOnly() throws Exception {
|
|
||||||
TestComplexCreate expected = TestComplexCreate.create(
|
|
||||||
null, TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build());
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreate.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].stringL1", expected.test1.stringL1);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].longVal", expected.test1.longVal);
|
|
||||||
|
|
||||||
TestComplexCreate actual = builderDynaBean.build(TestComplexCreate.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateNoParameters() throws Exception {
|
|
||||||
TestComplexCreate expected = TestComplexCreate.create(null, null);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreate.class, convertUtilsBean);
|
|
||||||
|
|
||||||
TestComplexCreate actual = builderDynaBean.build(TestComplexCreate.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSimpleBuilderAllParameters() throws Exception {
|
|
||||||
TestSimpleBuilder expected =
|
|
||||||
TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "stringL1", expected.stringL1);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "longVal", expected.longVal);
|
|
||||||
|
|
||||||
TestSimpleBuilder actual = builderDynaBean.build(TestSimpleBuilder.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSimpleBuilderMissingStringL1() throws Exception {
|
|
||||||
TestSimpleBuilder expected = TestSimpleBuilder.builder().longVal(10L).build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "longVal", expected.longVal);
|
|
||||||
|
|
||||||
TestSimpleBuilder actual = builderDynaBean.build(TestSimpleBuilder.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSimpleBuilderMissingLongVal() throws Exception {
|
|
||||||
TestSimpleBuilder expected = TestSimpleBuilder.builder().stringL1("l1").build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "stringL1", expected.stringL1);
|
|
||||||
|
|
||||||
TestSimpleBuilder actual = builderDynaBean.build(TestSimpleBuilder.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSimpleBuilderInvalidProperty() throws Exception {
|
|
||||||
thrown.expect(IllegalArgumentException.class);
|
|
||||||
thrown.expectMessage("Unknown property: invalidProperty");
|
|
||||||
|
|
||||||
TestSimpleBuilder expected = TestSimpleBuilder.builder().build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "invalidProperty", "invalid");
|
|
||||||
|
|
||||||
TestSimpleBuilder actual = builderDynaBean.build(TestSimpleBuilder.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateSimpleBuilderVariantAllParameters() throws Exception {
|
|
||||||
TestSimpleBuilder variant =
|
|
||||||
TestSimpleBuilder.builder().longVal(10L).stringL1("variant").build();
|
|
||||||
TestComplexCreateVariance expected = TestComplexCreateVariance.create("simple-builder", variant);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName);
|
|
||||||
utilsBean.setProperty(
|
|
||||||
builderDynaBean, "[1].class", expected.variant.getClass().getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].longVal", variant.longVal);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].stringL1", variant.stringL1);
|
|
||||||
|
|
||||||
TestComplexCreateVariance actual = builderDynaBean.build(TestComplexCreateVariance.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateVariantBuilderAllParameters() throws Exception {
|
|
||||||
TestVariantBuilder variant = TestVariantBuilder.builder()
|
|
||||||
.variantBuilderName("variant-build")
|
|
||||||
.intClass(20)
|
|
||||||
.testEnum(TestEnum.Blue)
|
|
||||||
.build();
|
|
||||||
TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant", variant);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].class", variant.getClass().getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].variantBuilderName", variant.variantBuilderName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].intClass", variant.intClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].testEnum", variant.testEnum);
|
|
||||||
|
|
||||||
TestComplexCreateVariance actual = builderDynaBean.build(TestComplexCreateVariance.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateVariantCreateAllParameters() throws Exception {
|
|
||||||
TestVariantCreate variant = TestVariantCreate.create("variant-create", 100L, "varied");
|
|
||||||
TestComplexCreateVariance expected = TestComplexCreateVariance.create("create-variant", variant);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].class", variant.getClass().getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].[0]", variant.variantCreateName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].[1]", variant.longClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].[2]", variant.varyString);
|
|
||||||
|
|
||||||
TestComplexCreateVariance actual = builderDynaBean.build(TestComplexCreateVariance.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateVariantBuilderAllParametersPrefixWithJoiner() throws Exception {
|
|
||||||
TestVariantBuilder variant = TestVariantBuilder.builder()
|
|
||||||
.variantBuilderName("variant-build")
|
|
||||||
.intClass(20)
|
|
||||||
.testEnum(TestEnum.Blue)
|
|
||||||
.build();
|
|
||||||
TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant-prefix", variant);
|
|
||||||
|
|
||||||
String prefix = variant.getClass().getEnclosingClass().getName() + "$";
|
|
||||||
BuilderDynaBean builderDynaBean =
|
|
||||||
new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, prefix);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].class", variant.getClass().getSimpleName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].variantBuilderName", variant.variantBuilderName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].intClass", variant.intClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].testEnum", variant.testEnum);
|
|
||||||
|
|
||||||
TestComplexCreateVariance actual = builderDynaBean.build(TestComplexCreateVariance.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateVariantBuilderAllParametersPrefixWithOutJoiner() throws Exception {
|
|
||||||
TestVariantBuilder variant = TestVariantBuilder.builder()
|
|
||||||
.variantBuilderName("variant-build")
|
|
||||||
.intClass(20)
|
|
||||||
.testEnum(TestEnum.Blue)
|
|
||||||
.build();
|
|
||||||
TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant-prefix", variant);
|
|
||||||
|
|
||||||
String prefix = variant.getClass().getEnclosingClass().getName();
|
|
||||||
BuilderDynaBean builderDynaBean =
|
|
||||||
new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, prefix);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].class", variant.getClass().getSimpleName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].variantBuilderName", variant.variantBuilderName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].intClass", variant.intClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].testEnum", variant.testEnum);
|
|
||||||
|
|
||||||
TestComplexCreateVariance actual = builderDynaBean.build(TestComplexCreateVariance.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateVariantInvalidVariantClass() throws Exception {
|
|
||||||
String invalidClass = "invalid-class";
|
|
||||||
thrown.expect(IllegalArgumentException.class);
|
|
||||||
thrown.expectMessage(containsString("Unable to load class"));
|
|
||||||
thrown.expectMessage(containsString(invalidClass));
|
|
||||||
thrown.expectMessage(containsString("Attempted"));
|
|
||||||
|
|
||||||
TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant-prefix", null);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].class", invalidClass);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexCreateVariantBadLoadClass() throws Exception {
|
|
||||||
thrown.expect(ExceptionInInitializerError.class);
|
|
||||||
thrown.expectCause(instanceOf(BadClassException.class));
|
|
||||||
TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant-prefix", null);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[1].class", getClass().getName() + "$BadClass");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexRootAllParameters() throws Exception {
|
|
||||||
TestSimpleBuilder simpleBuilder =
|
|
||||||
TestSimpleBuilder.builder().stringL1("simple-l1").longVal(20L).build();
|
|
||||||
TestRootClass expected = TestRootClass.builder()
|
|
||||||
.intVal(10)
|
|
||||||
.stringVal("root")
|
|
||||||
.testEnum(TestEnum.Red)
|
|
||||||
.testComplexCreate(TestComplexCreate.create(
|
|
||||||
"real",
|
|
||||||
TestSimpleBuilder.builder()
|
|
||||||
.stringL1("complex-l1")
|
|
||||||
.longVal(10L)
|
|
||||||
.build()))
|
|
||||||
.testSimpleBuilder(simpleBuilder)
|
|
||||||
.testSimpleCreate(TestSimpleCreate.create("first", "last"))
|
|
||||||
.build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestRootClass.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "intVal", expected.intVal);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "stringVal", expected.stringVal);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testEnum", expected.testEnum);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testComplexCreate.[0]", expected.testComplexCreate.realName);
|
|
||||||
utilsBean.setProperty(
|
|
||||||
builderDynaBean, "testComplexCreate.[1].stringL1", expected.testComplexCreate.test1.stringL1);
|
|
||||||
utilsBean.setProperty(
|
|
||||||
builderDynaBean, "testComplexCreate.[1].longVal", expected.testComplexCreate.test1.longVal);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testSimpleBuilder.class", TestSimpleBuilder.class.getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testSimpleBuilder.stringL1", simpleBuilder.stringL1);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testSimpleBuilder.longVal", simpleBuilder.longVal);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testSimpleCreate.[0]", expected.testSimpleCreate.firstName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testSimpleCreate.[1]", expected.testSimpleCreate.lastName);
|
|
||||||
|
|
||||||
TestRootClass actual = builderDynaBean.build(TestRootClass.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexRootNoParameters() throws Exception {
|
|
||||||
TestRootClass expected = TestRootClass.builder().build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestRootClass.class, convertUtilsBean);
|
|
||||||
|
|
||||||
TestRootClass actual = builderDynaBean.build(TestRootClass.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexRootTopLevelOnly() throws Exception {
|
|
||||||
TestRootClass expected = TestRootClass.builder()
|
|
||||||
.intVal(10)
|
|
||||||
.stringVal("root")
|
|
||||||
.testEnum(TestEnum.Red)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestRootClass.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "intVal", expected.intVal);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "stringVal", expected.stringVal);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testEnum", expected.testEnum);
|
|
||||||
|
|
||||||
TestRootClass actual = builderDynaBean.build(TestRootClass.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSupplierNotUsed() throws Exception {
|
|
||||||
TestVariantBuilder variant = TestVariantBuilder.builder()
|
|
||||||
.testEnum(TestEnum.Green)
|
|
||||||
.intClass(10)
|
|
||||||
.variantBuilderName("variant-supplier")
|
|
||||||
.build();
|
|
||||||
TestSupplierClass expected =
|
|
||||||
TestSupplierClass.builder().variantClass(variant).build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSupplierClass.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(
|
|
||||||
builderDynaBean, "variantClass.class", variant.getClass().getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "variantClass.testEnum", variant.testEnum);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "variantClass.intClass", variant.intClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "variantClass.variantBuilderName", variant.variantBuilderName);
|
|
||||||
|
|
||||||
TestSupplierClass actual = builderDynaBean.build(TestSupplierClass.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testConsumerMethodsNotExposed() throws Exception {
|
|
||||||
thrown.expect(IllegalArgumentException.class);
|
|
||||||
thrown.expectMessage(containsString("Unknown property: mutator"));
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSupplierClass.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "mutator", "test-value");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testAttemptToBuildForWrongClass() throws Exception {
|
|
||||||
thrown.expect(IllegalArgumentException.class);
|
|
||||||
thrown.expectMessage(containsString("cannot be assigned to"));
|
|
||||||
thrown.expectMessage(containsString(TestVariantCreate.class.getName()));
|
|
||||||
thrown.expectMessage(containsString(TestVariantBuilder.class.getName()));
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestVariantBuilder.class, convertUtilsBean);
|
|
||||||
builderDynaBean.build(TestVariantCreate.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testVariantBuildsToSuperType() throws Exception {
|
|
||||||
TestVariantBuilder expected = TestVariantBuilder.builder()
|
|
||||||
.intClass(10)
|
|
||||||
.testEnum(TestEnum.Green)
|
|
||||||
.variantBuilderName("variant-super")
|
|
||||||
.build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "class", expected.getClass().getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "intClass", expected.intClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testEnum", expected.testEnum);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "variantBuilderName", expected.variantBuilderName);
|
|
||||||
|
|
||||||
TestInterface actual = builderDynaBean.build(TestInterface.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testEmptyPropertyHandler() throws Exception {
|
|
||||||
String emptyPropertyValue = "test-property";
|
|
||||||
TestVariantCreate expected = TestVariantCreate.create(
|
|
||||||
emptyPropertyValue, (long) emptyPropertyValue.length(), emptyPropertyValue + "-vary");
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(
|
|
||||||
TestInterface.class,
|
|
||||||
convertUtilsBean,
|
|
||||||
s -> TestVariantCreate.create(s, (long) s.length(), s + "-vary"));
|
|
||||||
utilsBean.setProperty(builderDynaBean, "", emptyPropertyValue);
|
|
||||||
|
|
||||||
TestInterface actual = builderDynaBean.build(TestInterface.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testEmptyPropertyHandlerThrowsAfterUse() throws Exception {
|
|
||||||
thrown.expect(IllegalStateException.class);
|
|
||||||
thrown.expectMessage(containsString("When a property handler is resolved further properties may not be set."));
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(
|
|
||||||
TestInterface.class, convertUtilsBean, s -> TestVariantCreate.create("test", 10, "test"));
|
|
||||||
utilsBean.setProperty(builderDynaBean, "", "test");
|
|
||||||
utilsBean.setProperty(builderDynaBean, "[0]", "test");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testEmptyPropertyReturnsInvalidObject() throws Exception {
|
|
||||||
thrown.expect(IllegalArgumentException.class);
|
|
||||||
thrown.expectMessage(containsString(TestEnum.class.getName()));
|
|
||||||
thrown.expectMessage(containsString(TestInterface.class.getName()));
|
|
||||||
thrown.expectMessage(containsString("cannot be assigned to"));
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean =
|
|
||||||
new BuilderDynaBean(TestInterface.class, convertUtilsBean, s -> TestEnum.Green);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "", "test");
|
|
||||||
|
|
||||||
builderDynaBean.build(TestInterface.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSimpleArrayValues() throws Exception {
|
|
||||||
SimpleArrayClassVariant expected = SimpleArrayClassVariant.builder()
|
|
||||||
.ints(new Integer[] {1, 2, 3})
|
|
||||||
.variantName("simple-array")
|
|
||||||
.longs(new Long[] {1L, 2L, 3L})
|
|
||||||
.strings(new String[] {"a", "b", "c"})
|
|
||||||
.build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(SimpleArrayClassVariant.class, convertUtilsBean);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "variantName", expected.variantName);
|
|
||||||
for (int i = 0; i < expected.strings.length; ++i) {
|
|
||||||
utilsBean.setProperty(builderDynaBean, "strings[" + i + "]", expected.strings[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < expected.ints.length; ++i) {
|
|
||||||
utilsBean.setProperty(builderDynaBean, "ints[" + i + "]", expected.ints[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < expected.longs.length; ++i) {
|
|
||||||
utilsBean.setProperty(builderDynaBean, "longs[" + i + "]", expected.longs[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
SimpleArrayClassVariant actual = builderDynaBean.build(SimpleArrayClassVariant.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexArrayValuesBuilder() throws Exception {
|
|
||||||
TestVariantBuilder variant1 = TestVariantBuilder.builder()
|
|
||||||
.variantBuilderName("variant-1")
|
|
||||||
.testEnum(TestEnum.Green)
|
|
||||||
.intClass(10)
|
|
||||||
.build();
|
|
||||||
TestVariantBuilder variant2 = TestVariantBuilder.builder()
|
|
||||||
.variantBuilderName("variant-2")
|
|
||||||
.testEnum(TestEnum.Blue)
|
|
||||||
.intClass(20)
|
|
||||||
.build();
|
|
||||||
ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder()
|
|
||||||
.variantName("complex-test")
|
|
||||||
.tests(new TestInterface[] {variant1, variant2})
|
|
||||||
.build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(ComplexArrayClassVariant.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "variantName", expected.variantName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[0].class", TestVariantBuilder.class.getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[0].variantBuilderName", variant1.variantBuilderName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[0].intClass", variant1.intClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[0].testEnum", variant1.testEnum);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[1].class", TestVariantBuilder.class.getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[1].variantBuilderName", variant2.variantBuilderName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[1].intClass", variant2.intClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[1].testEnum", variant2.testEnum);
|
|
||||||
|
|
||||||
ComplexArrayClassVariant actual = builderDynaBean.build(ComplexArrayClassVariant.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexArrayValuesCreate() throws Exception {
|
|
||||||
TestVariantCreate variant1 = TestVariantCreate.create("variant-1", 10L, "vary-1");
|
|
||||||
TestVariantCreate variant2 = TestVariantCreate.create("variant-2", 20L, "vary-2");
|
|
||||||
|
|
||||||
ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder()
|
|
||||||
.variantName("create-test")
|
|
||||||
.tests(new TestInterface[] {variant1, variant2})
|
|
||||||
.build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(ComplexArrayClassVariant.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "variantName", expected.variantName);
|
|
||||||
utilsBean.setProperty(
|
|
||||||
builderDynaBean, "tests[0].class", variant1.getClass().getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[0].[0]", variant1.variantCreateName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[0].[1]", variant1.longClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[0].[2]", variant1.varyString);
|
|
||||||
|
|
||||||
utilsBean.setProperty(
|
|
||||||
builderDynaBean, "tests[1].class", variant2.getClass().getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[1].[0]", variant2.variantCreateName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[1].[1]", variant2.longClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, "tests[1].[2]", variant2.varyString);
|
|
||||||
|
|
||||||
ComplexArrayClassVariant actual = builderDynaBean.build(ComplexArrayClassVariant.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexArrayValuesMixed() throws Exception {
|
|
||||||
TestInterface[] variants = new TestInterface[10];
|
|
||||||
for (int i = 0; i < variants.length; ++i) {
|
|
||||||
if (i % 2 == 0) {
|
|
||||||
variants[i] = TestVariantCreate.create("create-variant-" + i, i + 5, "vary-" + i);
|
|
||||||
} else {
|
|
||||||
variants[i] = TestVariantBuilder.builder()
|
|
||||||
.testEnum(TestEnum.values()[i % TestEnum.values().length])
|
|
||||||
.intClass(i)
|
|
||||||
.variantBuilderName("builder-variant-" + i)
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder()
|
|
||||||
.variantName("large-complex")
|
|
||||||
.tests(variants)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(ComplexArrayClassVariant.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "variantName", expected.variantName);
|
|
||||||
for (int i = 0; i < variants.length; ++i) {
|
|
||||||
String prefix = "tests[" + i + "].";
|
|
||||||
TestInterface variant = variants[i];
|
|
||||||
if (variant instanceof TestVariantCreate) {
|
|
||||||
TestVariantCreate create = (TestVariantCreate) variant;
|
|
||||||
utilsBean.setProperty(
|
|
||||||
builderDynaBean, prefix + "class", create.getClass().getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, prefix + "[0]", create.variantCreateName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, prefix + "[1]", create.longClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, prefix + "[2]", create.varyString);
|
|
||||||
} else if (variant instanceof TestVariantBuilder) {
|
|
||||||
TestVariantBuilder builder = (TestVariantBuilder) variant;
|
|
||||||
utilsBean.setProperty(
|
|
||||||
builderDynaBean, prefix + "class", builder.getClass().getName());
|
|
||||||
utilsBean.setProperty(builderDynaBean, prefix + "variantBuilderName", builder.variantBuilderName);
|
|
||||||
utilsBean.setProperty(builderDynaBean, prefix + "intClass", builder.intClass);
|
|
||||||
utilsBean.setProperty(builderDynaBean, prefix + "testEnum", builder.testEnum);
|
|
||||||
} else {
|
|
||||||
fail("Unknown variant " + variants[i].getClass().getName());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ComplexArrayClassVariant actual = builderDynaBean.build(ComplexArrayClassVariant.class);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testInvalidBuilderCreateClassBuild() throws Exception {
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean);
|
|
||||||
|
|
||||||
TestInterface actual = builderDynaBean.build(TestInterface.class);
|
|
||||||
|
|
||||||
assertThat(actual, nullValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testInvalidBuilderCreateClassSetProperty() throws Exception {
|
|
||||||
thrown.expect(IllegalStateException.class);
|
|
||||||
thrown.expectMessage(containsString("Unable to to introspect or handle"));
|
|
||||||
thrown.expectMessage(containsString(TestInterface.class.getName()));
|
|
||||||
thrown.expectMessage(containsString("as it doesn't have a builder or create method"));
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "testProperty", "test");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetMapAccessThrowsException() throws Exception {
|
|
||||||
thrown.expect(UnsupportedOperationException.class);
|
|
||||||
thrown.expectMessage(BuilderDynaBean.NO_MAP_ACCESS_SUPPORT);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "stringL1(value)", "test");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetMapAccessThrowsException() throws Exception {
|
|
||||||
thrown.expect(UnsupportedOperationException.class);
|
|
||||||
thrown.expectMessage(BuilderDynaBean.NO_MAP_ACCESS_SUPPORT);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean);
|
|
||||||
//
|
|
||||||
// We directly access the get method as there is no way to trigger utilsBean to access it
|
|
||||||
//
|
|
||||||
builderDynaBean.get("stringL1", "value");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testRemoveThrowsException() throws Exception {
|
|
||||||
thrown.expect(UnsupportedOperationException.class);
|
|
||||||
thrown.expectMessage(BuilderDynaBean.NO_MAP_ACCESS_SUPPORT);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean);
|
|
||||||
//
|
|
||||||
// We directly access the remove method as there is no way to trigger utilsBean to access it
|
|
||||||
//
|
|
||||||
builderDynaBean.remove("stringL1", "value");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testContainsThrowsException() throws Exception {
|
|
||||||
thrown.expect(UnsupportedOperationException.class);
|
|
||||||
thrown.expectMessage(BuilderDynaBean.NO_MAP_ACCESS_SUPPORT);
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean);
|
|
||||||
//
|
|
||||||
// We directly access the remove method as there is no way to trigger utilsBean to access it
|
|
||||||
//
|
|
||||||
builderDynaBean.contains("stringL1", "value");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testAdditionalMutators() throws Exception {
|
|
||||||
TestSimpleBuilder expected =
|
|
||||||
TestSimpleBuilder.builder().stringL1("test").longVal(10L).build();
|
|
||||||
|
|
||||||
BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean);
|
|
||||||
|
|
||||||
utilsBean.setProperty(builderDynaBean, "stringL1", expected.stringL1);
|
|
||||||
|
|
||||||
TestSimpleBuilder actual =
|
|
||||||
builderDynaBean.build(TestSimpleBuilder.class, b -> ((TestSimpleBuilder.TestSimpleBuilderBuilder) b)
|
|
||||||
.longVal(expected.longVal));
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
public enum TestEnum {
|
|
||||||
Red,
|
|
||||||
Green,
|
|
||||||
Blue
|
|
||||||
}
|
|
||||||
|
|
||||||
public interface TestInterface {}
|
|
||||||
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
public static class TestSimpleCreate {
|
|
||||||
private final String firstName;
|
|
||||||
private final String lastName;
|
|
||||||
|
|
||||||
public static TestSimpleCreate create(String firstName, String lastName) {
|
|
||||||
return new TestSimpleCreate(firstName, lastName);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
public static class TestComplexCreate {
|
|
||||||
private final String realName;
|
|
||||||
private final TestSimpleBuilder test1;
|
|
||||||
|
|
||||||
public static TestComplexCreate create(String realName, TestSimpleBuilder test1) {
|
|
||||||
return new TestComplexCreate(realName, test1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
public static class TestComplexCreateVariance {
|
|
||||||
private final String varianceName;
|
|
||||||
private final TestInterface variant;
|
|
||||||
|
|
||||||
public static TestComplexCreateVariance create(String varianceName, TestInterface variant) {
|
|
||||||
return new TestComplexCreateVariance(varianceName, variant);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Builder
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
public static class TestSimpleBuilder implements TestInterface {
|
|
||||||
private String stringL1;
|
|
||||||
private long longVal;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Builder
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
public static class TestVariantBuilder implements TestInterface {
|
|
||||||
private String variantBuilderName;
|
|
||||||
private TestEnum testEnum;
|
|
||||||
private Integer intClass;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
public static class TestVariantCreate implements TestInterface {
|
|
||||||
private final String variantCreateName;
|
|
||||||
private final long longClass;
|
|
||||||
private final String varyString;
|
|
||||||
|
|
||||||
public static TestVariantCreate create(String variantCreateName, long longClass, String varyString) {
|
|
||||||
return new TestVariantCreate(variantCreateName, longClass, varyString);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Builder
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
public static class TestRootClass {
|
|
||||||
private String stringVal;
|
|
||||||
private int intVal;
|
|
||||||
private TestEnum testEnum;
|
|
||||||
TestSimpleCreate testSimpleCreate;
|
|
||||||
TestComplexCreate testComplexCreate;
|
|
||||||
TestSimpleBuilder testSimpleBuilder;
|
|
||||||
}
|
|
||||||
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
public static class TestSupplierClass {
|
|
||||||
private TestInterface variantClass;
|
|
||||||
|
|
||||||
public static TestSupplierClassBuilder builder() {
|
|
||||||
return new TestSupplierClassBuilder();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Builder
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
public static class SimpleArrayClassVariant implements TestInterface {
|
|
||||||
private String variantName;
|
|
||||||
private String[] strings;
|
|
||||||
private Integer[] ints;
|
|
||||||
private Long[] longs;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Builder
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
public static class ComplexArrayClassVariant implements TestInterface {
|
|
||||||
private String variantName;
|
|
||||||
private TestInterface[] tests;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class TestSupplierClassBuilder {
|
|
||||||
private TestSupplierClass testSupplierClass = new TestSupplierClass();
|
|
||||||
|
|
||||||
public TestSupplierClassBuilder variantClass(TestInterface testInterface) {
|
|
||||||
testSupplierClass.variantClass = testInterface;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public TestSupplierClassBuilder variantClass(Supplier<TestInterface> supplier) {
|
|
||||||
throw new IllegalStateException("Supplier method should not be used.");
|
|
||||||
}
|
|
||||||
|
|
||||||
public TestSupplierClassBuilder mutator(Consumer<TestSupplierClassBuilder> consumer) {
|
|
||||||
consumer.accept(this);
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public TestSupplierClass build() {
|
|
||||||
return testSupplierClass;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class BadClassException extends RuntimeException {
|
|
||||||
public BadClassException(String message) {
|
|
||||||
super(message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class BadClass {
|
|
||||||
static {
|
|
||||||
if (BuilderDynaBeanTest.isBad) {
|
|
||||||
throw new BadClassException("This is a bad class");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public String name = "default";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,234 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
import lombok.Builder;
|
|
||||||
import lombok.EqualsAndHashCode;
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.Setter;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
|
|
||||||
public class ConfigurationSettableUtilsTest {
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testNoPropertiesSet() {
|
|
||||||
ConfigResult expected = ConfigResult.builder().build();
|
|
||||||
|
|
||||||
ConfigObject configObject = ConfigObject.builder().build();
|
|
||||||
ConfigResult actual = resolve(configObject);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testPrimitivesSet() {
|
|
||||||
ConfigResult expected = ConfigResult.builder().rawInt(10).rawLong(15L).build();
|
|
||||||
|
|
||||||
ConfigObject configObject = ConfigObject.builder()
|
|
||||||
.rawInt(expected.rawInt)
|
|
||||||
.rawLong(expected.rawLong)
|
|
||||||
.build();
|
|
||||||
ConfigResult actual = resolve(configObject);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testBoolean() {
|
|
||||||
ConfigResult expected = ConfigResult.builder().bool(false).build();
|
|
||||||
|
|
||||||
ConfigObject configObject = ConfigObject.builder().bool(expected.bool).build();
|
|
||||||
ConfigResult actual = resolve(configObject);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testHeapValuesSet() {
|
|
||||||
ConfigResult expected =
|
|
||||||
ConfigResult.builder().name("test").boxedInt(10).boxedLong(15L).build();
|
|
||||||
|
|
||||||
ConfigObject configObject = ConfigObject.builder()
|
|
||||||
.name(expected.name)
|
|
||||||
.boxedInt(expected.boxedInt.intValue())
|
|
||||||
.boxedLong(expected.boxedLong.longValue())
|
|
||||||
.build();
|
|
||||||
ConfigResult actual = resolve(configObject);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testComplexValuesSet() {
|
|
||||||
ComplexValue complexValue =
|
|
||||||
ComplexValue.builder().name("complex").value(10).build();
|
|
||||||
ConfigResult expected =
|
|
||||||
ConfigResult.builder().complexValue(complexValue).build();
|
|
||||||
|
|
||||||
ConfigObject configObject = ConfigObject.builder()
|
|
||||||
.complexValue(ComplexValue.builder()
|
|
||||||
.name(complexValue.name)
|
|
||||||
.value(complexValue.value)
|
|
||||||
.build())
|
|
||||||
.build();
|
|
||||||
ConfigResult actual = resolve(configObject);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testOptionalValuesSet() {
|
|
||||||
ComplexValue complexValue =
|
|
||||||
ComplexValue.builder().name("optional-complex").value(20).build();
|
|
||||||
ConfigResult expected = ConfigResult.builder()
|
|
||||||
.optionalString(Optional.of("test"))
|
|
||||||
.optionalInteger(Optional.of(10))
|
|
||||||
.optionalLong(Optional.of(15L))
|
|
||||||
.optionalComplexValue(Optional.of(complexValue))
|
|
||||||
.build();
|
|
||||||
|
|
||||||
ConfigObject configObject = ConfigObject.builder()
|
|
||||||
.optionalString(expected.optionalString.get())
|
|
||||||
.optionalInteger(expected.optionalInteger.get())
|
|
||||||
.optionalLong(expected.optionalLong.get())
|
|
||||||
.optionalComplexValue(expected.optionalComplexValue.get())
|
|
||||||
.build();
|
|
||||||
ConfigResult actual = resolve(configObject);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testRenamedRawValues() {
|
|
||||||
ComplexValue complexValue =
|
|
||||||
ComplexValue.builder().name("renamed-complex").value(20).build();
|
|
||||||
ConfigResult expected = ConfigResult.builder()
|
|
||||||
.renamedString("renamed")
|
|
||||||
.renamedInt(10)
|
|
||||||
.renamedOptionalString(Optional.of("renamed-optional"))
|
|
||||||
.renamedComplexValue(complexValue)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
ConfigObject configObject = ConfigObject.builder()
|
|
||||||
.toRenameString(expected.renamedString)
|
|
||||||
.toRenameInt(expected.renamedInt)
|
|
||||||
.toRenameComplexValue(complexValue)
|
|
||||||
.optionalToRename(expected.renamedOptionalString.get())
|
|
||||||
.build();
|
|
||||||
ConfigResult actual = resolve(configObject);
|
|
||||||
|
|
||||||
assertThat(actual, equalTo(expected));
|
|
||||||
}
|
|
||||||
|
|
||||||
private ConfigResult resolve(ConfigObject configObject) {
|
|
||||||
return ConfigurationSettableUtils.resolveFields(
|
|
||||||
configObject, ConfigResult.builder().build());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@Builder
|
|
||||||
@Getter
|
|
||||||
@Setter
|
|
||||||
@EqualsAndHashCode
|
|
||||||
public static class ConfigResult {
|
|
||||||
private String name;
|
|
||||||
private int rawInt;
|
|
||||||
private Integer boxedInt;
|
|
||||||
private long rawLong;
|
|
||||||
private Long boxedLong;
|
|
||||||
private ComplexValue complexValue;
|
|
||||||
|
|
||||||
@Builder.Default
|
|
||||||
private Boolean bool = true;
|
|
||||||
|
|
||||||
private Optional<String> optionalString;
|
|
||||||
private Optional<Integer> optionalInteger;
|
|
||||||
private Optional<Long> optionalLong;
|
|
||||||
private Optional<ComplexValue> optionalComplexValue;
|
|
||||||
|
|
||||||
private String renamedString;
|
|
||||||
private int renamedInt;
|
|
||||||
private Optional<String> renamedOptionalString;
|
|
||||||
private ComplexValue renamedComplexValue;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@Builder
|
|
||||||
@EqualsAndHashCode
|
|
||||||
public static class ComplexValue {
|
|
||||||
private String name;
|
|
||||||
private int value;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Builder
|
|
||||||
public static class ConfigObject {
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class)
|
|
||||||
private String name;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class)
|
|
||||||
private int rawInt;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class)
|
|
||||||
@Builder.Default
|
|
||||||
private Boolean bool = true;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class)
|
|
||||||
private Integer boxedInt;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class)
|
|
||||||
private long rawLong;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class)
|
|
||||||
private Long boxedLong;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class)
|
|
||||||
private ComplexValue complexValue;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true)
|
|
||||||
private String optionalString;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true)
|
|
||||||
private Integer optionalInteger;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true)
|
|
||||||
private Long optionalLong;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true)
|
|
||||||
private ComplexValue optionalComplexValue;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedString")
|
|
||||||
private String toRenameString;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedInt")
|
|
||||||
private int toRenameInt;
|
|
||||||
|
|
||||||
@ConfigurationSettable(
|
|
||||||
configurationClass = ConfigResult.class,
|
|
||||||
methodName = "renamedOptionalString",
|
|
||||||
convertToOptional = true)
|
|
||||||
private String optionalToRename;
|
|
||||||
|
|
||||||
@ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedComplexValue")
|
|
||||||
private ComplexValue toRenameComplexValue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,70 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.kinesis.retrieval.fanout.FanOutConfig;
|
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
|
||||||
public class FanoutConfigBeanTest {
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private KinesisAsyncClient kinesisAsyncClient;
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testAllConfigurationTransits() {
|
|
||||||
FanoutConfigBean fanoutConfigBean = new FanoutConfigBean();
|
|
||||||
fanoutConfigBean.setConsumerArn("consumer-arn");
|
|
||||||
fanoutConfigBean.setConsumerName("consumer-name");
|
|
||||||
fanoutConfigBean.setMaxDescribeStreamConsumerRetries(10);
|
|
||||||
fanoutConfigBean.setMaxDescribeStreamSummaryRetries(20);
|
|
||||||
fanoutConfigBean.setRegisterStreamConsumerRetries(30);
|
|
||||||
fanoutConfigBean.setRetryBackoffMillis(1000);
|
|
||||||
|
|
||||||
ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean();
|
|
||||||
BeanUtilsBean utilsBean = new BeanUtilsBean(convertUtilsBean);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration configuration = new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean);
|
|
||||||
configuration.setStreamName("test-stream");
|
|
||||||
configuration.setApplicationName("test-application");
|
|
||||||
FanOutConfig fanOutConfig = fanoutConfigBean.build(kinesisAsyncClient, configuration);
|
|
||||||
|
|
||||||
assertThat(fanOutConfig.kinesisClient(), equalTo(kinesisAsyncClient));
|
|
||||||
assertThat(fanOutConfig.streamName(), equalTo(configuration.getStreamName()));
|
|
||||||
assertThat(fanOutConfig.applicationName(), equalTo(configuration.getApplicationName()));
|
|
||||||
assertThat(fanOutConfig.consumerArn(), equalTo(fanoutConfigBean.getConsumerArn()));
|
|
||||||
assertThat(fanOutConfig.consumerName(), equalTo(fanoutConfigBean.getConsumerName()));
|
|
||||||
assertThat(
|
|
||||||
fanOutConfig.maxDescribeStreamConsumerRetries(),
|
|
||||||
equalTo(fanoutConfigBean.getMaxDescribeStreamConsumerRetries()));
|
|
||||||
assertThat(
|
|
||||||
fanOutConfig.maxDescribeStreamSummaryRetries(),
|
|
||||||
equalTo(fanoutConfigBean.getMaxDescribeStreamSummaryRetries()));
|
|
||||||
assertThat(
|
|
||||||
fanOutConfig.registerStreamConsumerRetries(),
|
|
||||||
equalTo(fanoutConfigBean.getRegisterStreamConsumerRetries()));
|
|
||||||
assertThat(fanOutConfig.retryBackoffMillis(), equalTo(fanoutConfigBean.getRetryBackoffMillis()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,759 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.net.URI;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Date;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.NoSuchElementException;
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableSet;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.commons.lang3.exception.ExceptionUtils;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentials;
|
|
||||||
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
|
||||||
import software.amazon.kinesis.common.InitialPositionInStream;
|
|
||||||
import software.amazon.kinesis.coordinator.CoordinatorConfig;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsLevel;
|
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
|
||||||
import static org.hamcrest.CoreMatchers.nullValue;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.assertNull;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
|
||||||
public class KinesisClientLibConfiguratorTest {
|
|
||||||
|
|
||||||
private final String credentialName1 = AlwaysSucceedCredentialsProvider.class.getName();
|
|
||||||
private final String credentialName2 = AlwaysFailCredentialsProvider.class.getName();
|
|
||||||
private final String credentialNameKinesis = AlwaysSucceedCredentialsProviderKinesis.class.getName();
|
|
||||||
private final String credentialNameDynamoDB = AlwaysSucceedCredentialsProviderDynamoDB.class.getName();
|
|
||||||
private final String credentialNameCloudWatch = AlwaysSucceedCredentialsProviderCloudWatch.class.getName();
|
|
||||||
private final KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator();
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithBasicSetup() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1,
|
|
||||||
"workerId = 123"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
assertEquals(config.getApplicationName(), "b");
|
|
||||||
assertEquals(config.getStreamName(), "a");
|
|
||||||
assertEquals(config.getWorkerIdentifier(), "123");
|
|
||||||
assertThat(config.getMaxGetRecordsThreadPool(), nullValue());
|
|
||||||
assertThat(config.getRetryGetRecordsInSeconds(), nullValue());
|
|
||||||
assertNull(config.getGracefulLeaseHandoffTimeoutMillis());
|
|
||||||
assertNull(config.getIsGracefulLeaseHandoffEnabled());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithLongVariables() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = app",
|
|
||||||
"streamName = 123",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"workerId = 123",
|
|
||||||
"failoverTimeMillis = 100",
|
|
||||||
"shardSyncIntervalMillis = 500"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(config.getApplicationName(), "app");
|
|
||||||
assertEquals(config.getStreamName(), "123");
|
|
||||||
assertEquals(config.getWorkerIdentifier(), "123");
|
|
||||||
assertEquals(config.getFailoverTimeMillis(), 100);
|
|
||||||
assertEquals(config.getShardSyncIntervalMillis(), 500);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithInitialPositionInStreamExtended() {
|
|
||||||
long epochTimeInSeconds = 1617406032;
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = app",
|
|
||||||
"streamName = 123",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"initialPositionInStreamExtended = " + epochTimeInSeconds
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(config.getInitialPositionInStreamExtended().getTimestamp(), new Date(epochTimeInSeconds * 1000L));
|
|
||||||
assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.AT_TIMESTAMP);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testInvalidInitialPositionInStream() {
|
|
||||||
// AT_TIMESTAMP cannot be used as initialPositionInStream. If a user wants to specify AT_TIMESTAMP,
|
|
||||||
// they must specify the time with initialPositionInStreamExtended.
|
|
||||||
try {
|
|
||||||
getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = app",
|
|
||||||
"streamName = 123",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"initialPositionInStream = AT_TIMESTAMP"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
fail("Should have thrown when initialPositionInStream is set to AT_TIMESTAMP");
|
|
||||||
} catch (Exception e) {
|
|
||||||
Throwable rootCause = ExceptionUtils.getRootCause(e);
|
|
||||||
assertTrue(rootCause instanceof IllegalArgumentException);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testInvalidInitialPositionInStreamExtended() {
|
|
||||||
// initialPositionInStreamExtended takes a long value indicating seconds since epoch. If a non-long
|
|
||||||
// value is provided, the constructor should throw an IllegalArgumentException exception.
|
|
||||||
try {
|
|
||||||
getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = app",
|
|
||||||
"streamName = 123",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"initialPositionInStreamExtended = null"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
fail("Should have thrown when initialPositionInStreamExtended is set to null");
|
|
||||||
} catch (Exception e) {
|
|
||||||
Throwable rootCause = ExceptionUtils.getRootCause(e);
|
|
||||||
assertTrue(rootCause instanceof IllegalArgumentException);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGracefulLeaseHandoffConfig() {
|
|
||||||
final Long testGracefulLeaseHandoffTimeoutMillis = 12345L;
|
|
||||||
final boolean testGracefulLeaseHandoffEnabled = true;
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = dummyApplicationName",
|
|
||||||
"streamName = dummyStreamName",
|
|
||||||
"AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"gracefulLeaseHandoffTimeoutMillis = " + testGracefulLeaseHandoffTimeoutMillis,
|
|
||||||
"isGracefulLeaseHandoffEnabled = " + testGracefulLeaseHandoffEnabled
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(testGracefulLeaseHandoffTimeoutMillis, config.getGracefulLeaseHandoffTimeoutMillis());
|
|
||||||
assertEquals(testGracefulLeaseHandoffEnabled, config.getIsGracefulLeaseHandoffEnabled());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testClientVersionConfig() {
|
|
||||||
final CoordinatorConfig.ClientVersionConfig testClientVersionConfig = Arrays.stream(
|
|
||||||
CoordinatorConfig.ClientVersionConfig.values())
|
|
||||||
.findAny()
|
|
||||||
.orElseThrow(NoSuchElementException::new);
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = dummyApplicationName",
|
|
||||||
"streamName = dummyStreamName",
|
|
||||||
"AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"clientVersionConfig = " + testClientVersionConfig.name()
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(testClientVersionConfig, config.getClientVersionConfig());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCoordinatorStateConfig() {
|
|
||||||
final String testCoordinatorStateTableName = "CoordState";
|
|
||||||
final BillingMode testCoordinatorStateBillingMode = BillingMode.PAY_PER_REQUEST;
|
|
||||||
final long testCoordinatorStateReadCapacity = 123;
|
|
||||||
final long testCoordinatorStateWriteCapacity = 123;
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = dummyApplicationName",
|
|
||||||
"streamName = dummyStreamName",
|
|
||||||
"AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"coordinatorStateTableName = " + testCoordinatorStateTableName,
|
|
||||||
"coordinatorStateBillingMode = " + testCoordinatorStateBillingMode.name(),
|
|
||||||
"coordinatorStateReadCapacity = " + testCoordinatorStateReadCapacity,
|
|
||||||
"coordinatorStateWriteCapacity = " + testCoordinatorStateWriteCapacity
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(testCoordinatorStateTableName, config.getCoordinatorStateTableName());
|
|
||||||
assertEquals(testCoordinatorStateBillingMode, config.getCoordinatorStateBillingMode());
|
|
||||||
assertEquals(testCoordinatorStateReadCapacity, config.getCoordinatorStateReadCapacity());
|
|
||||||
assertEquals(testCoordinatorStateWriteCapacity, config.getCoordinatorStateWriteCapacity());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWorkerUtilizationAwareAssignmentConfig() {
|
|
||||||
final long testInMemoryWorkerMetricsCaptureFrequencyMillis = 123;
|
|
||||||
final long testWorkerMetricsReporterFreqInMillis = 123;
|
|
||||||
final long testNoOfPersistedMetricsPerWorkerMetrics = 123;
|
|
||||||
final Boolean testDisableWorkerMetrics = true;
|
|
||||||
final double testMaxThroughputPerHostKBps = 123;
|
|
||||||
final long testDampeningPercentage = 12;
|
|
||||||
final long testReBalanceThresholdPercentage = 12;
|
|
||||||
final Boolean testAllowThroughputOvershoot = false;
|
|
||||||
final long testVarianceBalancingFrequency = 12;
|
|
||||||
final double testWorkerMetricsEMAAlpha = .123;
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = dummyApplicationName",
|
|
||||||
"streamName = dummyStreamName",
|
|
||||||
"AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"inMemoryWorkerMetricsCaptureFrequencyMillis = " + testInMemoryWorkerMetricsCaptureFrequencyMillis,
|
|
||||||
"workerMetricsReporterFreqInMillis = " + testWorkerMetricsReporterFreqInMillis,
|
|
||||||
"noOfPersistedMetricsPerWorkerMetrics = " + testNoOfPersistedMetricsPerWorkerMetrics,
|
|
||||||
"disableWorkerMetrics = " + testDisableWorkerMetrics,
|
|
||||||
"maxThroughputPerHostKBps = " + testMaxThroughputPerHostKBps,
|
|
||||||
"dampeningPercentage = " + testDampeningPercentage,
|
|
||||||
"reBalanceThresholdPercentage = " + testReBalanceThresholdPercentage,
|
|
||||||
"allowThroughputOvershoot = " + testAllowThroughputOvershoot,
|
|
||||||
"varianceBalancingFrequency = " + testVarianceBalancingFrequency,
|
|
||||||
"workerMetricsEMAAlpha = " + testWorkerMetricsEMAAlpha
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
testInMemoryWorkerMetricsCaptureFrequencyMillis,
|
|
||||||
config.getInMemoryWorkerMetricsCaptureFrequencyMillis());
|
|
||||||
assertEquals(testWorkerMetricsReporterFreqInMillis, config.getWorkerMetricsReporterFreqInMillis());
|
|
||||||
assertEquals(testNoOfPersistedMetricsPerWorkerMetrics, config.getNoOfPersistedMetricsPerWorkerMetrics());
|
|
||||||
assertEquals(testDisableWorkerMetrics, config.getDisableWorkerMetrics());
|
|
||||||
assertEquals(testMaxThroughputPerHostKBps, config.getMaxThroughputPerHostKBps(), 0.0001);
|
|
||||||
assertEquals(testDampeningPercentage, config.getDampeningPercentage());
|
|
||||||
assertEquals(testReBalanceThresholdPercentage, config.getReBalanceThresholdPercentage());
|
|
||||||
assertEquals(testAllowThroughputOvershoot, config.getAllowThroughputOvershoot());
|
|
||||||
assertEquals(testVarianceBalancingFrequency, config.getVarianceBalancingFrequency());
|
|
||||||
assertEquals(testWorkerMetricsEMAAlpha, config.getWorkerMetricsEMAAlpha(), 0.0001);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWorkerMetricsConfig() {
|
|
||||||
final String testWorkerMetricsTableName = "CoordState";
|
|
||||||
final BillingMode testWorkerMetricsBillingMode = BillingMode.PROVISIONED;
|
|
||||||
final long testWorkerMetricsReadCapacity = 123;
|
|
||||||
final long testWorkerMetricsWriteCapacity = 123;
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = dummyApplicationName",
|
|
||||||
"streamName = dummyStreamName",
|
|
||||||
"AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"workerMetricsTableName = " + testWorkerMetricsTableName,
|
|
||||||
"workerMetricsBillingMode = " + testWorkerMetricsBillingMode.name(),
|
|
||||||
"workerMetricsReadCapacity = " + testWorkerMetricsReadCapacity,
|
|
||||||
"workerMetricsWriteCapacity = " + testWorkerMetricsWriteCapacity
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(testWorkerMetricsTableName, config.getWorkerMetricsTableName());
|
|
||||||
assertEquals(testWorkerMetricsBillingMode, config.getWorkerMetricsBillingMode());
|
|
||||||
assertEquals(testWorkerMetricsReadCapacity, config.getWorkerMetricsReadCapacity());
|
|
||||||
assertEquals(testWorkerMetricsWriteCapacity, config.getWorkerMetricsWriteCapacity());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testInvalidClientVersionConfig() {
|
|
||||||
getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = dummyApplicationName",
|
|
||||||
"streamName = dummyStreamName",
|
|
||||||
"AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"clientVersionConfig = " + "invalid_client_version_config"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithUnsupportedClientConfigurationVariables() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"AwsCredentialsProvider = " + credentialName1 + ", " + credentialName2,
|
|
||||||
"workerId = id",
|
|
||||||
"kinesisClientConfig = {}",
|
|
||||||
"streamName = stream",
|
|
||||||
"applicationName = b"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(config.getApplicationName(), "b");
|
|
||||||
assertEquals(config.getStreamName(), "stream");
|
|
||||||
assertEquals(config.getWorkerIdentifier(), "id");
|
|
||||||
// by setting the configuration there is no effect on kinesisClientConfiguration variable.
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithIntVariables() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = kinesis",
|
|
||||||
"AwsCredentialsProvider = " + credentialName2 + ", " + credentialName1,
|
|
||||||
"workerId = w123",
|
|
||||||
"maxRecords = 10",
|
|
||||||
"metricsMaxQueueSize = 20",
|
|
||||||
"applicationName = kinesis",
|
|
||||||
"retryGetRecordsInSeconds = 2",
|
|
||||||
"maxGetRecordsThreadPool = 1"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(config.getApplicationName(), "kinesis");
|
|
||||||
assertEquals(config.getStreamName(), "kinesis");
|
|
||||||
assertEquals(config.getWorkerIdentifier(), "w123");
|
|
||||||
assertEquals(config.getMaxRecords(), 10);
|
|
||||||
assertEquals(config.getMetricsMaxQueueSize(), 20);
|
|
||||||
assertThat(config.getRetryGetRecordsInSeconds(), equalTo(2));
|
|
||||||
assertThat(config.getMaxGetRecordsThreadPool(), equalTo(1));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithBooleanVariables() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = ABCD, " + credentialName1,
|
|
||||||
"workerId = 0",
|
|
||||||
"cleanupLeasesUponShardCompletion = false",
|
|
||||||
"validateSequenceNumberBeforeCheckpointing = true"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(config.getApplicationName(), "b");
|
|
||||||
assertEquals(config.getStreamName(), "a");
|
|
||||||
assertEquals(config.getWorkerIdentifier(), "0");
|
|
||||||
assertFalse(config.isCleanupLeasesUponShardCompletion());
|
|
||||||
assertTrue(config.isValidateSequenceNumberBeforeCheckpointing());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithStringVariables() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = ABCD," + credentialName1,
|
|
||||||
"workerId = 1",
|
|
||||||
"kinesisEndpoint = https://kinesis",
|
|
||||||
"metricsLevel = SUMMARY"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(config.getWorkerIdentifier(), "1");
|
|
||||||
assertEquals(config.getKinesisClient().get("endpointOverride"), URI.create("https://kinesis"));
|
|
||||||
assertEquals(config.getMetricsLevel(), MetricsLevel.SUMMARY);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithSetVariables() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = ABCD," + credentialName1,
|
|
||||||
"workerId = 1",
|
|
||||||
"metricsEnabledDimensions = ShardId, WorkerIdentifier"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
Set<String> expectedMetricsEnabledDimensions = ImmutableSet.<String>builder()
|
|
||||||
.add("ShardId", "WorkerIdentifier")
|
|
||||||
.build();
|
|
||||||
assertThat(
|
|
||||||
new HashSet<>(Arrays.asList(config.getMetricsEnabledDimensions())),
|
|
||||||
equalTo(expectedMetricsEnabledDimensions));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithInitialPositionInStreamTrimHorizon() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = ABCD," + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"initialPositionInStream = TriM_Horizon"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithInitialPositionInStreamLatest() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = ABCD," + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"initialPositionInStream = LateSt"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.LATEST);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSkippingNonKCLVariables() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = ABCD," + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"initialPositionInStream = TriM_Horizon",
|
|
||||||
"abc = 1"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
|
|
||||||
assertEquals(config.getApplicationName(), "b");
|
|
||||||
assertEquals(config.getStreamName(), "a");
|
|
||||||
assertEquals(config.getWorkerIdentifier(), "123");
|
|
||||||
assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testEmptyOptionalVariables() {
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = ABCD," + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"initialPositionInStream = TriM_Horizon",
|
|
||||||
"maxGetRecordsThreadPool = 1"
|
|
||||||
},
|
|
||||||
'\n'));
|
|
||||||
assertThat(config.getMaxGetRecordsThreadPool(), equalTo(1));
|
|
||||||
assertThat(config.getRetryGetRecordsInSeconds(), nullValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithZeroValue() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = ABCD," + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"initialPositionInStream = TriM_Horizon",
|
|
||||||
"maxGetRecordsThreadPool = 0",
|
|
||||||
"retryGetRecordsInSeconds = 0"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
getConfiguration(test);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithInvalidIntValue() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"failoverTimeMillis = 100nf"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
getConfiguration(test);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithNegativeIntValue() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"failoverTimeMillis = -12"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
|
|
||||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
|
||||||
getConfiguration(test);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testWithMissingCredentialsProvider() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"workerId = 123",
|
|
||||||
"failoverTimeMillis = 100",
|
|
||||||
"shardSyncIntervalMillis = 500"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
|
|
||||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
|
||||||
getConfiguration(test);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithMissingWorkerId() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1,
|
|
||||||
"failoverTimeMillis = 100",
|
|
||||||
"shardSyncIntervalMillis = 500"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(test);
|
|
||||||
|
|
||||||
// if workerId is not provided, configurator should assign one for it automatically
|
|
||||||
assertNotNull(config.getWorkerIdentifier());
|
|
||||||
assertFalse(config.getWorkerIdentifier().isEmpty());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = NullPointerException.class)
|
|
||||||
public void testWithMissingStreamNameAndMissingStreamArn() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"failoverTimeMillis = 100"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
getConfiguration(test);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
|
||||||
public void testWithEmptyStreamNameAndMissingStreamArn() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"failoverTimeMillis = 100",
|
|
||||||
"streamName = ",
|
|
||||||
"streamArn = "
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
getConfiguration(test);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(expected = NullPointerException.class)
|
|
||||||
public void testWithMissingApplicationName() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"AwsCredentialsProvider = " + credentialName1,
|
|
||||||
"workerId = 123",
|
|
||||||
"failoverTimeMillis = 100"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
getConfiguration(test);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWithAwsCredentialsFailed() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = " + credentialName2,
|
|
||||||
"failoverTimeMillis = 100",
|
|
||||||
"shardSyncIntervalMillis = 500"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
MultiLangDaemonConfiguration config = getConfiguration(test);
|
|
||||||
|
|
||||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
|
||||||
try {
|
|
||||||
config.getKinesisCredentialsProvider()
|
|
||||||
.build(AwsCredentialsProvider.class)
|
|
||||||
.resolveCredentials();
|
|
||||||
fail("expect failure with wrong credentials provider");
|
|
||||||
} catch (Exception e) {
|
|
||||||
// succeed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testProcessKeyWithExpectedCasing() {
|
|
||||||
String key = "AwsCredentialsProvider";
|
|
||||||
String result = configurator.processKey(key);
|
|
||||||
assertEquals("awsCredentialsProvider", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testProcessKeyWithOldCasing() {
|
|
||||||
String key = "AWSCredentialsProvider";
|
|
||||||
String result = configurator.processKey(key);
|
|
||||||
assertEquals("awsCredentialsProvider", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testProcessKeyWithMixedCasing() {
|
|
||||||
String key = "AwScReDeNtIaLsPrOvIdEr";
|
|
||||||
String result = configurator.processKey(key);
|
|
||||||
assertEquals("awsCredentialsProvider", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testProcessKeyWithSuffix() {
|
|
||||||
String key = "awscredentialsproviderDynamoDB";
|
|
||||||
String result = configurator.processKey(key);
|
|
||||||
assertEquals("awsCredentialsProviderDynamoDB", result);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: fix this test
|
|
||||||
@Test
|
|
||||||
public void testWithDifferentAwsCredentialsForDynamoDBAndCloudWatch() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = " + credentialNameKinesis,
|
|
||||||
"AwsCredentialsProviderDynamoDB = " + credentialNameDynamoDB,
|
|
||||||
"AwsCredentialsProviderCloudWatch = " + credentialNameCloudWatch,
|
|
||||||
"failoverTimeMillis = 100",
|
|
||||||
"shardSyncIntervalMillis = 500"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
|
|
||||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
|
||||||
final MultiLangDaemonConfiguration config = getConfiguration(test);
|
|
||||||
config.getKinesisCredentialsProvider()
|
|
||||||
.build(AwsCredentialsProvider.class)
|
|
||||||
.resolveCredentials();
|
|
||||||
config.getDynamoDBCredentialsProvider()
|
|
||||||
.build(AwsCredentialsProvider.class)
|
|
||||||
.resolveCredentials();
|
|
||||||
config.getCloudWatchCredentialsProvider()
|
|
||||||
.build(AwsCredentialsProvider.class)
|
|
||||||
.resolveCredentials();
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: fix this test
|
|
||||||
@Test
|
|
||||||
public void testWithDifferentAwsCredentialsForDynamoDBAndCloudWatchFailed() {
|
|
||||||
String test = StringUtils.join(
|
|
||||||
new String[] {
|
|
||||||
"streamName = a",
|
|
||||||
"applicationName = b",
|
|
||||||
"AwsCredentialsProvider = " + credentialNameKinesis,
|
|
||||||
"AwsCredentialsProviderDynamoDB = " + credentialName2,
|
|
||||||
"AwsCredentialsProviderCloudWatch = " + credentialName2,
|
|
||||||
"failoverTimeMillis = 100",
|
|
||||||
"shardSyncIntervalMillis = 500"
|
|
||||||
},
|
|
||||||
'\n');
|
|
||||||
|
|
||||||
// separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement
|
|
||||||
final MultiLangDaemonConfiguration config = getConfiguration(test);
|
|
||||||
config.getKinesisCredentialsProvider()
|
|
||||||
.build(AwsCredentialsProvider.class)
|
|
||||||
.resolveCredentials();
|
|
||||||
try {
|
|
||||||
config.getDynamoDBCredentialsProvider()
|
|
||||||
.build(AwsCredentialsProvider.class)
|
|
||||||
.resolveCredentials();
|
|
||||||
fail("DynamoDB credential providers should fail.");
|
|
||||||
} catch (Exception e) {
|
|
||||||
// succeed
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
config.getCloudWatchCredentialsProvider()
|
|
||||||
.build(AwsCredentialsProvider.class)
|
|
||||||
.resolveCredentials();
|
|
||||||
fail("CloudWatch credential providers should fail.");
|
|
||||||
} catch (Exception e) {
|
|
||||||
// succeed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This credentials provider will always succeed
|
|
||||||
*/
|
|
||||||
public static class AlwaysSucceedCredentialsProvider implements AwsCredentialsProvider {
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
return AwsBasicCredentials.create("a", "b");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This credentials provider will always succeed
|
|
||||||
*/
|
|
||||||
public static class AlwaysSucceedCredentialsProviderKinesis implements AwsCredentialsProvider {
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
return AwsBasicCredentials.create("DUMMY_ACCESS_KEY_ID", "DUMMY_SECRET_ACCESS_KEY");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This credentials provider will always succeed
|
|
||||||
*/
|
|
||||||
public static class AlwaysSucceedCredentialsProviderDynamoDB implements AwsCredentialsProvider {
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
return AwsBasicCredentials.create("DUMMY_ACCESS_KEY_ID", "DUMMY_SECRET_ACCESS_KEY");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This credentials provider will always succeed
|
|
||||||
*/
|
|
||||||
public static class AlwaysSucceedCredentialsProviderCloudWatch implements AwsCredentialsProvider {
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
return AwsBasicCredentials.create("DUMMY_ACCESS_KEY_ID", "DUMMY_SECRET_ACCESS_KEY");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This credentials provider will always fail
|
|
||||||
*/
|
|
||||||
public static class AlwaysFailCredentialsProvider implements AwsCredentialsProvider {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public AwsCredentials resolveCredentials() {
|
|
||||||
throw new IllegalArgumentException();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private MultiLangDaemonConfiguration getConfiguration(String configString) {
|
|
||||||
InputStream input = new ByteArrayInputStream(configString.getBytes());
|
|
||||||
return configurator.getConfiguration(input);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,513 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.NoSuchElementException;
|
|
||||||
|
|
||||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Rule;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.rules.ExpectedException;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.Mockito;
|
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
|
||||||
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
|
|
||||||
import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.kinesis.common.ConfigsBuilder;
|
|
||||||
import software.amazon.kinesis.coordinator.CoordinatorConfig;
|
|
||||||
import software.amazon.kinesis.leases.LeaseManagementConfig;
|
|
||||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
|
||||||
import software.amazon.kinesis.retrieval.fanout.FanOutConfig;
|
|
||||||
import software.amazon.kinesis.retrieval.polling.PollingConfig;
|
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
|
||||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertNotEquals;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
|
||||||
public class MultiLangDaemonConfigurationTest {
|
|
||||||
|
|
||||||
private static final String AWS_REGION_PROPERTY_NAME = "aws.region";
|
|
||||||
private static final String DUMMY_APPLICATION_NAME = "dummyApplicationName";
|
|
||||||
private static final String DUMMY_STREAM_NAME = "dummyStreamName";
|
|
||||||
|
|
||||||
private BeanUtilsBean utilsBean;
|
|
||||||
private ConvertUtilsBean convertUtilsBean;
|
|
||||||
private String originalRegionValue;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private ShardRecordProcessorFactory shardRecordProcessorFactory;
|
|
||||||
|
|
||||||
@Rule
|
|
||||||
public final ExpectedException thrown = ExpectedException.none();
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void setup() {
|
|
||||||
originalRegionValue = System.getProperty(AWS_REGION_PROPERTY_NAME);
|
|
||||||
System.setProperty(AWS_REGION_PROPERTY_NAME, "us-east-1");
|
|
||||||
convertUtilsBean = new ConvertUtilsBean();
|
|
||||||
utilsBean = new BeanUtilsBean(convertUtilsBean);
|
|
||||||
}
|
|
||||||
|
|
||||||
@After
|
|
||||||
public void after() {
|
|
||||||
if (originalRegionValue != null) {
|
|
||||||
System.setProperty(AWS_REGION_PROPERTY_NAME, originalRegionValue);
|
|
||||||
} else {
|
|
||||||
System.clearProperty(AWS_REGION_PROPERTY_NAME);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public MultiLangDaemonConfiguration baseConfiguration() {
|
|
||||||
MultiLangDaemonConfiguration configuration = new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean);
|
|
||||||
configuration.setApplicationName(DUMMY_APPLICATION_NAME);
|
|
||||||
configuration.setStreamName(DUMMY_STREAM_NAME);
|
|
||||||
configuration.getKinesisCredentialsProvider().set("class", DefaultCredentialsProvider.class.getName());
|
|
||||||
|
|
||||||
return configuration;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetPrimitiveValue() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setMaxLeasesForWorker(10);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertThat(resolvedConfiguration.leaseManagementConfig.maxLeasesForWorker(), equalTo(10));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetEnablePriorityLeaseAssignment() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setEnablePriorityLeaseAssignment(false);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertThat(resolvedConfiguration.leaseManagementConfig.enablePriorityLeaseAssignment(), equalTo(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetLeaseTableDeletionProtectionEnabledToTrue() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setLeaseTableDeletionProtectionEnabled(true);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertTrue(resolvedConfiguration.leaseManagementConfig.leaseTableDeletionProtectionEnabled());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGracefulLeaseHandoffConfig() {
|
|
||||||
final LeaseManagementConfig.GracefulLeaseHandoffConfig defaultGracefulLeaseHandoffConfig =
|
|
||||||
getTestConfigsBuilder().leaseManagementConfig().gracefulLeaseHandoffConfig();
|
|
||||||
|
|
||||||
final long testGracefulLeaseHandoffTimeoutMillis =
|
|
||||||
defaultGracefulLeaseHandoffConfig.gracefulLeaseHandoffTimeoutMillis() + 12345;
|
|
||||||
final boolean testGracefulLeaseHandoffEnabled =
|
|
||||||
!defaultGracefulLeaseHandoffConfig.isGracefulLeaseHandoffEnabled();
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setGracefulLeaseHandoffTimeoutMillis(testGracefulLeaseHandoffTimeoutMillis);
|
|
||||||
configuration.setIsGracefulLeaseHandoffEnabled(testGracefulLeaseHandoffEnabled);
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
final LeaseManagementConfig.GracefulLeaseHandoffConfig gracefulLeaseHandoffConfig =
|
|
||||||
resolvedConfiguration.leaseManagementConfig.gracefulLeaseHandoffConfig();
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
testGracefulLeaseHandoffTimeoutMillis, gracefulLeaseHandoffConfig.gracefulLeaseHandoffTimeoutMillis());
|
|
||||||
assertEquals(testGracefulLeaseHandoffEnabled, gracefulLeaseHandoffConfig.isGracefulLeaseHandoffEnabled());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGracefulLeaseHandoffUsesDefaults() {
|
|
||||||
final MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
baseConfiguration().resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
final LeaseManagementConfig.GracefulLeaseHandoffConfig gracefulLeaseHandoffConfig =
|
|
||||||
resolvedConfiguration.leaseManagementConfig.gracefulLeaseHandoffConfig();
|
|
||||||
|
|
||||||
final LeaseManagementConfig.GracefulLeaseHandoffConfig defaultGracefulLeaseHandoffConfig =
|
|
||||||
getTestConfigsBuilder().leaseManagementConfig().gracefulLeaseHandoffConfig();
|
|
||||||
|
|
||||||
assertEquals(defaultGracefulLeaseHandoffConfig, gracefulLeaseHandoffConfig);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWorkerUtilizationAwareAssignmentConfig() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
|
|
||||||
configuration.setInMemoryWorkerMetricsCaptureFrequencyMillis(123);
|
|
||||||
configuration.setWorkerMetricsReporterFreqInMillis(123);
|
|
||||||
configuration.setNoOfPersistedMetricsPerWorkerMetrics(123);
|
|
||||||
configuration.setDisableWorkerMetrics(true);
|
|
||||||
configuration.setMaxThroughputPerHostKBps(.123);
|
|
||||||
configuration.setDampeningPercentage(12);
|
|
||||||
configuration.setReBalanceThresholdPercentage(12);
|
|
||||||
configuration.setAllowThroughputOvershoot(false);
|
|
||||||
configuration.setVarianceBalancingFrequency(12);
|
|
||||||
configuration.setWorkerMetricsEMAAlpha(.123);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
LeaseManagementConfig leaseManagementConfig = resolvedConfiguration.leaseManagementConfig;
|
|
||||||
LeaseManagementConfig.WorkerUtilizationAwareAssignmentConfig config =
|
|
||||||
leaseManagementConfig.workerUtilizationAwareAssignmentConfig();
|
|
||||||
|
|
||||||
assertEquals(config.inMemoryWorkerMetricsCaptureFrequencyMillis(), 123);
|
|
||||||
assertEquals(config.workerMetricsReporterFreqInMillis(), 123);
|
|
||||||
assertEquals(config.noOfPersistedMetricsPerWorkerMetrics(), 123);
|
|
||||||
assertTrue(config.disableWorkerMetrics());
|
|
||||||
assertEquals(config.maxThroughputPerHostKBps(), .123, .25);
|
|
||||||
assertEquals(config.dampeningPercentage(), 12);
|
|
||||||
assertEquals(config.reBalanceThresholdPercentage(), 12);
|
|
||||||
assertFalse(config.allowThroughputOvershoot());
|
|
||||||
assertEquals(config.varianceBalancingFrequency(), 12);
|
|
||||||
assertEquals(config.workerMetricsEMAAlpha(), .123, .25);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWorkerUtilizationAwareAssignmentConfigUsesDefaults() {
|
|
||||||
final LeaseManagementConfig.WorkerUtilizationAwareAssignmentConfig defaultWorkerUtilAwareAssignmentConfig =
|
|
||||||
getTestConfigsBuilder().leaseManagementConfig().workerUtilizationAwareAssignmentConfig();
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setVarianceBalancingFrequency(
|
|
||||||
defaultWorkerUtilAwareAssignmentConfig.varianceBalancingFrequency() + 12345);
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
final LeaseManagementConfig.WorkerUtilizationAwareAssignmentConfig resolvedWorkerUtilAwareAssignmentConfig =
|
|
||||||
resolvedConfiguration.leaseManagementConfig.workerUtilizationAwareAssignmentConfig();
|
|
||||||
|
|
||||||
assertNotEquals(defaultWorkerUtilAwareAssignmentConfig, resolvedWorkerUtilAwareAssignmentConfig);
|
|
||||||
|
|
||||||
// apart from the single updated configuration, all other config values should be equal to the default
|
|
||||||
resolvedWorkerUtilAwareAssignmentConfig.varianceBalancingFrequency(
|
|
||||||
defaultWorkerUtilAwareAssignmentConfig.varianceBalancingFrequency());
|
|
||||||
assertEquals(defaultWorkerUtilAwareAssignmentConfig, resolvedWorkerUtilAwareAssignmentConfig);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWorkerMetricsTableConfigBean() {
|
|
||||||
final BillingMode testWorkerMetricsTableBillingMode = BillingMode.PROVISIONED;
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
|
|
||||||
configuration.setWorkerMetricsTableName("testTable");
|
|
||||||
configuration.setWorkerMetricsBillingMode(testWorkerMetricsTableBillingMode);
|
|
||||||
configuration.setWorkerMetricsReadCapacity(123);
|
|
||||||
configuration.setWorkerMetricsWriteCapacity(123);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
LeaseManagementConfig leaseManagementConfig = resolvedConfiguration.leaseManagementConfig;
|
|
||||||
LeaseManagementConfig.WorkerUtilizationAwareAssignmentConfig workerUtilizationConfig =
|
|
||||||
leaseManagementConfig.workerUtilizationAwareAssignmentConfig();
|
|
||||||
LeaseManagementConfig.WorkerMetricsTableConfig workerMetricsConfig =
|
|
||||||
workerUtilizationConfig.workerMetricsTableConfig();
|
|
||||||
|
|
||||||
assertEquals(workerMetricsConfig.tableName(), "testTable");
|
|
||||||
assertEquals(workerMetricsConfig.billingMode(), testWorkerMetricsTableBillingMode);
|
|
||||||
assertEquals(workerMetricsConfig.readCapacity(), 123);
|
|
||||||
assertEquals(workerMetricsConfig.writeCapacity(), 123);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testWorkerMetricsTableConfigUsesDefaults() {
|
|
||||||
final LeaseManagementConfig.WorkerMetricsTableConfig defaultWorkerMetricsTableConfig = getTestConfigsBuilder()
|
|
||||||
.leaseManagementConfig()
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig();
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setWorkerMetricsBillingMode(Arrays.stream(BillingMode.values())
|
|
||||||
.filter(billingMode -> billingMode != defaultWorkerMetricsTableConfig.billingMode())
|
|
||||||
.findFirst()
|
|
||||||
.orElseThrow(NoSuchElementException::new));
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
final LeaseManagementConfig.WorkerMetricsTableConfig resolvedWorkerMetricsTableConfig = resolvedConfiguration
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig();
|
|
||||||
|
|
||||||
assertNotEquals(defaultWorkerMetricsTableConfig, resolvedWorkerMetricsTableConfig);
|
|
||||||
|
|
||||||
// apart from the single updated configuration, all other config values should be equal to the default
|
|
||||||
resolvedWorkerMetricsTableConfig.billingMode(defaultWorkerMetricsTableConfig.billingMode());
|
|
||||||
assertEquals(defaultWorkerMetricsTableConfig, resolvedWorkerMetricsTableConfig);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCoordinatorStateTableConfigBean() {
|
|
||||||
final BillingMode testWorkerMetricsTableBillingMode = BillingMode.PAY_PER_REQUEST;
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
|
|
||||||
configuration.setCoordinatorStateTableName("testTable");
|
|
||||||
configuration.setCoordinatorStateBillingMode(testWorkerMetricsTableBillingMode);
|
|
||||||
configuration.setCoordinatorStateReadCapacity(123);
|
|
||||||
configuration.setCoordinatorStateWriteCapacity(123);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
CoordinatorConfig coordinatorConfig = resolvedConfiguration.getCoordinatorConfig();
|
|
||||||
CoordinatorConfig.CoordinatorStateTableConfig coordinatorStateConfig =
|
|
||||||
coordinatorConfig.coordinatorStateTableConfig();
|
|
||||||
assertEquals(coordinatorStateConfig.tableName(), "testTable");
|
|
||||||
assertEquals(coordinatorStateConfig.billingMode(), testWorkerMetricsTableBillingMode);
|
|
||||||
assertEquals(coordinatorStateConfig.readCapacity(), 123);
|
|
||||||
assertEquals(coordinatorStateConfig.writeCapacity(), 123);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCoordinatorStateTableConfigUsesDefaults() {
|
|
||||||
final CoordinatorConfig.CoordinatorStateTableConfig defaultCoordinatorStateTableConfig =
|
|
||||||
getTestConfigsBuilder().coordinatorConfig().coordinatorStateTableConfig();
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setCoordinatorStateWriteCapacity(defaultCoordinatorStateTableConfig.writeCapacity() + 12345);
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
final CoordinatorConfig.CoordinatorStateTableConfig resolvedCoordinatorStateTableConfig =
|
|
||||||
resolvedConfiguration.coordinatorConfig.coordinatorStateTableConfig();
|
|
||||||
|
|
||||||
assertNotEquals(defaultCoordinatorStateTableConfig, resolvedCoordinatorStateTableConfig);
|
|
||||||
|
|
||||||
// apart from the single updated configuration, all other config values should be equal to the default
|
|
||||||
resolvedCoordinatorStateTableConfig.writeCapacity(defaultCoordinatorStateTableConfig.writeCapacity());
|
|
||||||
assertEquals(defaultCoordinatorStateTableConfig, resolvedCoordinatorStateTableConfig);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetLeaseTablePitrEnabledToTrue() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setLeaseTablePitrEnabled(true);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertTrue(resolvedConfiguration.leaseManagementConfig.leaseTablePitrEnabled());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetLeaseTableDeletionProtectionEnabledToFalse() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setLeaseTableDeletionProtectionEnabled(false);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertFalse(resolvedConfiguration.leaseManagementConfig.leaseTableDeletionProtectionEnabled());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetLeaseTablePitrEnabledToFalse() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setLeaseTablePitrEnabled(false);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertFalse(resolvedConfiguration.leaseManagementConfig.leaseTablePitrEnabled());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testDefaultRetrievalConfig() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertThat(
|
|
||||||
resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testDefaultRetrievalConfigWithPollingConfigSet() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setMaxRecords(10);
|
|
||||||
configuration.setIdleTimeBetweenReadsInMillis(60000);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertThat(
|
|
||||||
resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(PollingConfig.class));
|
|
||||||
assertEquals(
|
|
||||||
10,
|
|
||||||
((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig()).maxRecords());
|
|
||||||
assertEquals(
|
|
||||||
60000,
|
|
||||||
((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig())
|
|
||||||
.idleTimeBetweenReadsInMillis());
|
|
||||||
assertTrue(((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig())
|
|
||||||
.usePollingConfigIdleTimeValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testFanoutRetrievalMode() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setRetrievalMode(RetrievalMode.FANOUT);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertThat(
|
|
||||||
resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testPollingRetrievalMode() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setRetrievalMode(RetrievalMode.POLLING);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertThat(
|
|
||||||
resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(PollingConfig.class));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testRetrievalModeSetForPollingString() throws Exception {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
|
|
||||||
utilsBean.setProperty(
|
|
||||||
configuration, "retrievalMode", RetrievalMode.POLLING.name().toLowerCase());
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertThat(
|
|
||||||
resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(PollingConfig.class));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testRetrievalModeSetForFanoutString() throws Exception {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
|
|
||||||
utilsBean.setProperty(
|
|
||||||
configuration, "retrievalMode", RetrievalMode.FANOUT.name().toLowerCase());
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertThat(
|
|
||||||
resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testInvalidRetrievalMode() throws Exception {
|
|
||||||
thrown.expect(IllegalArgumentException.class);
|
|
||||||
thrown.expectMessage("Unknown retrieval type");
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
|
|
||||||
utilsBean.setProperty(configuration, "retrievalMode", "invalid");
|
|
||||||
}
|
|
||||||
|
|
||||||
// @Test
|
|
||||||
// TODO : Enable this test once https://github.com/awslabs/amazon-kinesis-client/issues/692 is resolved
|
|
||||||
public void testmetricsEnabledDimensions() {
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setMetricsEnabledDimensions(new String[] {"Operation"});
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testFanoutConfigSetConsumerName() {
|
|
||||||
String consumerArn = "test-consumer";
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
|
|
||||||
configuration.setRetrievalMode(RetrievalMode.FANOUT);
|
|
||||||
configuration.getFanoutConfig().setConsumerArn(consumerArn);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
assertThat(
|
|
||||||
resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class));
|
|
||||||
FanOutConfig fanOutConfig =
|
|
||||||
(FanOutConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig();
|
|
||||||
|
|
||||||
assertThat(fanOutConfig.consumerArn(), equalTo(consumerArn));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testClientVersionConfig() {
|
|
||||||
final CoordinatorConfig.ClientVersionConfig testClientVersionConfig =
|
|
||||||
CoordinatorConfig.ClientVersionConfig.CLIENT_VERSION_CONFIG_COMPATIBLE_WITH_2X;
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration configuration = baseConfiguration();
|
|
||||||
configuration.setClientVersionConfig(testClientVersionConfig);
|
|
||||||
|
|
||||||
final MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
configuration.resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
final CoordinatorConfig coordinatorConfig = resolvedConfiguration.coordinatorConfig;
|
|
||||||
|
|
||||||
assertEquals(testClientVersionConfig, coordinatorConfig.clientVersionConfig());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testClientVersionConfigUsesDefault() {
|
|
||||||
final MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration =
|
|
||||||
baseConfiguration().resolvedConfiguration(shardRecordProcessorFactory);
|
|
||||||
|
|
||||||
final CoordinatorConfig coordinatorConfig = resolvedConfiguration.coordinatorConfig;
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
getTestConfigsBuilder().coordinatorConfig().clientVersionConfig(),
|
|
||||||
coordinatorConfig.clientVersionConfig());
|
|
||||||
}
|
|
||||||
|
|
||||||
private ConfigsBuilder getTestConfigsBuilder() {
|
|
||||||
return new ConfigsBuilder(
|
|
||||||
DUMMY_STREAM_NAME,
|
|
||||||
DUMMY_APPLICATION_NAME,
|
|
||||||
Mockito.mock(KinesisAsyncClient.class),
|
|
||||||
Mockito.mock(DynamoDbAsyncClient.class),
|
|
||||||
Mockito.mock(CloudWatchAsyncClient.class),
|
|
||||||
"dummyWorkerIdentifier",
|
|
||||||
shardRecordProcessorFactory);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,68 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.kinesis.retrieval.polling.PollingConfig;
|
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
|
||||||
public class PollingConfigBeanTest {
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private KinesisAsyncClient kinesisAsyncClient;
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testAllPropertiesTransit() {
|
|
||||||
PollingConfigBean pollingConfigBean = new PollingConfigBean();
|
|
||||||
pollingConfigBean.setIdleTimeBetweenReadsInMillis(1000);
|
|
||||||
pollingConfigBean.setMaxGetRecordsThreadPool(20);
|
|
||||||
pollingConfigBean.setMaxRecords(5000);
|
|
||||||
pollingConfigBean.setRetryGetRecordsInSeconds(30);
|
|
||||||
|
|
||||||
ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean();
|
|
||||||
BeanUtilsBean utilsBean = new BeanUtilsBean(convertUtilsBean);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration multiLangDaemonConfiguration =
|
|
||||||
new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean);
|
|
||||||
multiLangDaemonConfiguration.setStreamName("test-stream");
|
|
||||||
|
|
||||||
PollingConfig pollingConfig = pollingConfigBean.build(kinesisAsyncClient, multiLangDaemonConfiguration);
|
|
||||||
|
|
||||||
assertThat(pollingConfig.kinesisClient(), equalTo(kinesisAsyncClient));
|
|
||||||
assertThat(pollingConfig.streamName(), equalTo(multiLangDaemonConfiguration.getStreamName()));
|
|
||||||
assertThat(
|
|
||||||
pollingConfig.idleTimeBetweenReadsInMillis(),
|
|
||||||
equalTo(pollingConfigBean.getIdleTimeBetweenReadsInMillis()));
|
|
||||||
assertThat(
|
|
||||||
pollingConfig.maxGetRecordsThreadPool(),
|
|
||||||
equalTo(Optional.of(pollingConfigBean.getMaxGetRecordsThreadPool())));
|
|
||||||
assertThat(pollingConfig.maxRecords(), equalTo(pollingConfigBean.getMaxRecords()));
|
|
||||||
assertThat(
|
|
||||||
pollingConfig.retryGetRecordsInSeconds(),
|
|
||||||
equalTo(Optional.of(pollingConfigBean.getRetryGetRecordsInSeconds())));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,303 +0,0 @@
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.time.Duration;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collections;
|
|
||||||
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.Tag;
|
|
||||||
import software.amazon.kinesis.coordinator.CoordinatorConfig.ClientVersionConfig;
|
|
||||||
import software.amazon.kinesis.multilang.MultiLangDaemonConfig;
|
|
||||||
import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration.ResolvedConfiguration;
|
|
||||||
import software.amazon.kinesis.processor.ShardRecordProcessor;
|
|
||||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
|
||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
||||||
|
|
||||||
public class PropertiesMappingE2ETest {
|
|
||||||
private static final String PROPERTIES_FILE = "multilang.properties";
|
|
||||||
private static final String PROPERTIES_FILE_V3 = "multilangv3.properties";
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testKclV3PropertiesMapping() throws IOException {
|
|
||||||
final MultiLangDaemonConfig config = new MultiLangDaemonConfig(PROPERTIES_FILE);
|
|
||||||
|
|
||||||
final ResolvedConfiguration kclV3Config =
|
|
||||||
config.getMultiLangDaemonConfiguration().resolvedConfiguration(new TestRecordProcessorFactory());
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
ClientVersionConfig.CLIENT_VERSION_CONFIG_COMPATIBLE_WITH_2X,
|
|
||||||
kclV3Config.coordinatorConfig.clientVersionConfig());
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
"MultiLangTest-CoordinatorState-CustomName",
|
|
||||||
kclV3Config.coordinatorConfig.coordinatorStateTableConfig().tableName());
|
|
||||||
assertEquals(
|
|
||||||
BillingMode.PROVISIONED,
|
|
||||||
kclV3Config.coordinatorConfig.coordinatorStateTableConfig().billingMode());
|
|
||||||
assertEquals(
|
|
||||||
1000,
|
|
||||||
kclV3Config.coordinatorConfig.coordinatorStateTableConfig().readCapacity());
|
|
||||||
assertEquals(
|
|
||||||
500, kclV3Config.coordinatorConfig.coordinatorStateTableConfig().writeCapacity());
|
|
||||||
assertTrue(kclV3Config.coordinatorConfig.coordinatorStateTableConfig().pointInTimeRecoveryEnabled());
|
|
||||||
assertTrue(kclV3Config.coordinatorConfig.coordinatorStateTableConfig().deletionProtectionEnabled());
|
|
||||||
assertEquals(
|
|
||||||
Arrays.asList(
|
|
||||||
Tag.builder().key("csTagK1").value("csTagV1").build(),
|
|
||||||
Tag.builder().key("csTagK2").value("csTagV2").build(),
|
|
||||||
Tag.builder().key("csTagK3").value("csTagV3").build()),
|
|
||||||
kclV3Config.coordinatorConfig.coordinatorStateTableConfig().tags());
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
10000L,
|
|
||||||
kclV3Config.leaseManagementConfig.gracefulLeaseHandoffConfig().gracefulLeaseHandoffTimeoutMillis());
|
|
||||||
assertFalse(
|
|
||||||
kclV3Config.leaseManagementConfig.gracefulLeaseHandoffConfig().isGracefulLeaseHandoffEnabled());
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
5000L,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.inMemoryWorkerMetricsCaptureFrequencyMillis());
|
|
||||||
assertEquals(
|
|
||||||
60000L,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsReporterFreqInMillis());
|
|
||||||
assertEquals(
|
|
||||||
50,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.noOfPersistedMetricsPerWorkerMetrics());
|
|
||||||
assertTrue(kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.disableWorkerMetrics());
|
|
||||||
assertEquals(
|
|
||||||
10000,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.maxThroughputPerHostKBps());
|
|
||||||
assertEquals(
|
|
||||||
90,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.dampeningPercentage());
|
|
||||||
assertEquals(
|
|
||||||
5,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.reBalanceThresholdPercentage());
|
|
||||||
assertFalse(kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.allowThroughputOvershoot());
|
|
||||||
assertEquals(
|
|
||||||
Duration.ofHours(12),
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.staleWorkerMetricsEntryCleanupDuration());
|
|
||||||
assertEquals(
|
|
||||||
5,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.varianceBalancingFrequency());
|
|
||||||
assertEquals(
|
|
||||||
0.18D,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsEMAAlpha());
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
"MultiLangTest-WorkerMetrics-CustomName",
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.tableName());
|
|
||||||
assertEquals(
|
|
||||||
BillingMode.PROVISIONED,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.billingMode());
|
|
||||||
assertEquals(
|
|
||||||
250,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.readCapacity());
|
|
||||||
assertEquals(
|
|
||||||
90,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.writeCapacity());
|
|
||||||
assertTrue(kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.pointInTimeRecoveryEnabled());
|
|
||||||
assertTrue(kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.deletionProtectionEnabled());
|
|
||||||
assertEquals(
|
|
||||||
Arrays.asList(
|
|
||||||
Tag.builder().key("wmTagK1").value("wmTagV1").build(),
|
|
||||||
Tag.builder().key("wmTagK2").value("wmTagV2").build()),
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.tags());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testKclV3PropertiesMappingForDefaultValues() throws IOException {
|
|
||||||
final MultiLangDaemonConfig config = new MultiLangDaemonConfig(PROPERTIES_FILE_V3);
|
|
||||||
|
|
||||||
final ResolvedConfiguration kclV3Config =
|
|
||||||
config.getMultiLangDaemonConfiguration().resolvedConfiguration(new TestRecordProcessorFactory());
|
|
||||||
|
|
||||||
assertEquals(ClientVersionConfig.CLIENT_VERSION_CONFIG_3X, kclV3Config.coordinatorConfig.clientVersionConfig());
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
"MultiLangTest-CoordinatorState",
|
|
||||||
kclV3Config.coordinatorConfig.coordinatorStateTableConfig().tableName());
|
|
||||||
assertEquals(
|
|
||||||
BillingMode.PAY_PER_REQUEST,
|
|
||||||
kclV3Config.coordinatorConfig.coordinatorStateTableConfig().billingMode());
|
|
||||||
assertFalse(kclV3Config.coordinatorConfig.coordinatorStateTableConfig().pointInTimeRecoveryEnabled());
|
|
||||||
assertFalse(kclV3Config.coordinatorConfig.coordinatorStateTableConfig().deletionProtectionEnabled());
|
|
||||||
assertEquals(
|
|
||||||
Collections.emptyList(),
|
|
||||||
kclV3Config.coordinatorConfig.coordinatorStateTableConfig().tags());
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
30_000L,
|
|
||||||
kclV3Config.leaseManagementConfig.gracefulLeaseHandoffConfig().gracefulLeaseHandoffTimeoutMillis());
|
|
||||||
assertTrue(
|
|
||||||
kclV3Config.leaseManagementConfig.gracefulLeaseHandoffConfig().isGracefulLeaseHandoffEnabled());
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
1000L,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.inMemoryWorkerMetricsCaptureFrequencyMillis());
|
|
||||||
assertEquals(
|
|
||||||
30000L,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsReporterFreqInMillis());
|
|
||||||
assertEquals(
|
|
||||||
10,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.noOfPersistedMetricsPerWorkerMetrics());
|
|
||||||
assertFalse(kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.disableWorkerMetrics());
|
|
||||||
assertEquals(
|
|
||||||
Double.MAX_VALUE,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.maxThroughputPerHostKBps());
|
|
||||||
assertEquals(
|
|
||||||
60,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.dampeningPercentage());
|
|
||||||
assertEquals(
|
|
||||||
10,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.reBalanceThresholdPercentage());
|
|
||||||
assertTrue(kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.allowThroughputOvershoot());
|
|
||||||
assertEquals(
|
|
||||||
Duration.ofDays(1),
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.staleWorkerMetricsEntryCleanupDuration());
|
|
||||||
assertEquals(
|
|
||||||
3,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.varianceBalancingFrequency());
|
|
||||||
assertEquals(
|
|
||||||
0.5D,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsEMAAlpha());
|
|
||||||
|
|
||||||
assertEquals(
|
|
||||||
"MultiLangTest-WorkerMetricStats",
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.tableName());
|
|
||||||
assertEquals(
|
|
||||||
BillingMode.PAY_PER_REQUEST,
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.billingMode());
|
|
||||||
assertFalse(kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.pointInTimeRecoveryEnabled());
|
|
||||||
assertFalse(kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.deletionProtectionEnabled());
|
|
||||||
assertEquals(
|
|
||||||
Collections.emptyList(),
|
|
||||||
kclV3Config
|
|
||||||
.leaseManagementConfig
|
|
||||||
.workerUtilizationAwareAssignmentConfig()
|
|
||||||
.workerMetricsTableConfig()
|
|
||||||
.tags());
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class TestRecordProcessorFactory implements ShardRecordProcessorFactory {
|
|
||||||
@Override
|
|
||||||
public ShardRecordProcessor shardRecordProcessor() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,68 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.config;
|
|
||||||
|
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
import org.apache.commons.beanutils.BeanUtilsBean;
|
|
||||||
import org.apache.commons.beanutils.ConvertUtilsBean;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.runner.RunWith;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.runners.MockitoJUnitRunner;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.kinesis.retrieval.polling.PollingConfig;
|
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
|
||||||
public class WorkerUtilizationAwareAssignmentConfigBeanTest {
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private KinesisAsyncClient kinesisAsyncClient;
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testAllPropertiesTransit() {
|
|
||||||
PollingConfigBean pollingConfigBean = new PollingConfigBean();
|
|
||||||
pollingConfigBean.setIdleTimeBetweenReadsInMillis(1000);
|
|
||||||
pollingConfigBean.setMaxGetRecordsThreadPool(20);
|
|
||||||
pollingConfigBean.setMaxRecords(5000);
|
|
||||||
pollingConfigBean.setRetryGetRecordsInSeconds(30);
|
|
||||||
|
|
||||||
ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean();
|
|
||||||
BeanUtilsBean utilsBean = new BeanUtilsBean(convertUtilsBean);
|
|
||||||
|
|
||||||
MultiLangDaemonConfiguration multiLangDaemonConfiguration =
|
|
||||||
new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean);
|
|
||||||
multiLangDaemonConfiguration.setStreamName("test-stream");
|
|
||||||
|
|
||||||
PollingConfig pollingConfig = pollingConfigBean.build(kinesisAsyncClient, multiLangDaemonConfiguration);
|
|
||||||
|
|
||||||
assertThat(pollingConfig.kinesisClient(), equalTo(kinesisAsyncClient));
|
|
||||||
assertThat(pollingConfig.streamName(), equalTo(multiLangDaemonConfiguration.getStreamName()));
|
|
||||||
assertThat(
|
|
||||||
pollingConfig.idleTimeBetweenReadsInMillis(),
|
|
||||||
equalTo(pollingConfigBean.getIdleTimeBetweenReadsInMillis()));
|
|
||||||
assertThat(
|
|
||||||
pollingConfig.maxGetRecordsThreadPool(),
|
|
||||||
equalTo(Optional.of(pollingConfigBean.getMaxGetRecordsThreadPool())));
|
|
||||||
assertThat(pollingConfig.maxRecords(), equalTo(pollingConfigBean.getMaxRecords()));
|
|
||||||
assertThat(
|
|
||||||
pollingConfig.retryGetRecordsInSeconds(),
|
|
||||||
equalTo(Optional.of(pollingConfigBean.getRetryGetRecordsInSeconds())));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,171 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.multilang.messages;
|
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
import java.time.Instant;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.function.Function;
|
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
import org.hamcrest.Description;
|
|
||||||
import org.hamcrest.Matcher;
|
|
||||||
import org.hamcrest.TypeSafeDiagnosingMatcher;
|
|
||||||
import org.junit.Test;
|
|
||||||
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
|
||||||
import static org.hamcrest.CoreMatchers.nullValue;
|
|
||||||
import static org.hamcrest.CoreMatchers.sameInstance;
|
|
||||||
import static org.junit.Assert.assertThat;
|
|
||||||
|
|
||||||
public class JsonFriendlyRecordTest {
|
|
||||||
|
|
||||||
private KinesisClientRecord kinesisClientRecord;
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testRecordHandlesNullData() {
|
|
||||||
kinesisClientRecord = defaultRecord().data(null).build();
|
|
||||||
JsonFriendlyRecord jsonFriendlyRecord = JsonFriendlyRecord.fromKinesisClientRecord(kinesisClientRecord);
|
|
||||||
|
|
||||||
assertThat(jsonFriendlyRecord, equivalentTo(kinesisClientRecord));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testRecordHandlesNoByteArrayBuffer() {
|
|
||||||
byte[] expected = new byte[] {1, 2, 3, 4};
|
|
||||||
|
|
||||||
ByteBuffer expectedBuffer = ByteBuffer.allocateDirect(expected.length);
|
|
||||||
|
|
||||||
expectedBuffer.put(expected);
|
|
||||||
expectedBuffer.rewind();
|
|
||||||
|
|
||||||
kinesisClientRecord = defaultRecord().data(expectedBuffer).build();
|
|
||||||
JsonFriendlyRecord jsonFriendlyRecord = JsonFriendlyRecord.fromKinesisClientRecord(kinesisClientRecord);
|
|
||||||
|
|
||||||
expectedBuffer.rewind();
|
|
||||||
assertThat(jsonFriendlyRecord, equivalentTo(kinesisClientRecord));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testRecordHandlesArrayByteBuffer() {
|
|
||||||
ByteBuffer expected = ByteBuffer.wrap(new byte[] {1, 2, 3, 4});
|
|
||||||
kinesisClientRecord = defaultRecord().data(expected).build();
|
|
||||||
JsonFriendlyRecord jsonFriendlyRecord = JsonFriendlyRecord.fromKinesisClientRecord(kinesisClientRecord);
|
|
||||||
|
|
||||||
assertThat(jsonFriendlyRecord, equivalentTo(kinesisClientRecord));
|
|
||||||
}
|
|
||||||
|
|
||||||
private static RecordMatcher equivalentTo(KinesisClientRecord expected) {
|
|
||||||
return new RecordMatcher(expected);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class RecordMatcher extends TypeSafeDiagnosingMatcher<JsonFriendlyRecord> {
|
|
||||||
|
|
||||||
private final KinesisClientRecord expected;
|
|
||||||
private final List<Matcher<?>> matchers;
|
|
||||||
|
|
||||||
private RecordMatcher(KinesisClientRecord expected) {
|
|
||||||
this.matchers = Arrays.asList(
|
|
||||||
new FieldMatcher<>(
|
|
||||||
"approximateArrivalTimestamp",
|
|
||||||
equalTo(expected.approximateArrivalTimestamp().toEpochMilli()),
|
|
||||||
JsonFriendlyRecord::getApproximateArrivalTimestamp),
|
|
||||||
new FieldMatcher<>("partitionKey", expected::partitionKey, JsonFriendlyRecord::getPartitionKey),
|
|
||||||
new FieldMatcher<>(
|
|
||||||
"sequenceNumber", expected::sequenceNumber, JsonFriendlyRecord::getSequenceNumber),
|
|
||||||
new FieldMatcher<>(
|
|
||||||
"subSequenceNumber", expected::subSequenceNumber, JsonFriendlyRecord::getSubSequenceNumber),
|
|
||||||
new FieldMatcher<>("data", dataEquivalentTo(expected.data()), JsonFriendlyRecord::getData));
|
|
||||||
|
|
||||||
this.expected = expected;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected boolean matchesSafely(JsonFriendlyRecord item, Description mismatchDescription) {
|
|
||||||
return matchers.stream()
|
|
||||||
.map(m -> {
|
|
||||||
if (!m.matches(item)) {
|
|
||||||
m.describeMismatch(item, mismatchDescription);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
})
|
|
||||||
.reduce((l, r) -> l && r)
|
|
||||||
.orElse(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void describeTo(Description description) {
|
|
||||||
description.appendText("A JsonFriendlyRecord matching ").appendList("(", ", ", ")", matchers);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static Matcher<Object> dataEquivalentTo(ByteBuffer expected) {
|
|
||||||
if (expected == null) {
|
|
||||||
return nullValue();
|
|
||||||
} else {
|
|
||||||
if (expected.hasArray()) {
|
|
||||||
return sameInstance(expected.array());
|
|
||||||
} else {
|
|
||||||
byte[] contents = new byte[expected.limit()];
|
|
||||||
expected.get(contents);
|
|
||||||
return equalTo(contents);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class FieldMatcher<ItemT, ClassT> extends TypeSafeDiagnosingMatcher<ClassT> {
|
|
||||||
|
|
||||||
final String fieldName;
|
|
||||||
final Matcher<ItemT> matcher;
|
|
||||||
final Function<ClassT, ItemT> extractor;
|
|
||||||
|
|
||||||
private FieldMatcher(String fieldName, Supplier<ItemT> expected, Function<ClassT, ItemT> extractor) {
|
|
||||||
this(fieldName, equalTo(expected.get()), extractor);
|
|
||||||
}
|
|
||||||
|
|
||||||
private FieldMatcher(String fieldName, Matcher<ItemT> matcher, Function<ClassT, ItemT> extractor) {
|
|
||||||
this.fieldName = fieldName;
|
|
||||||
this.matcher = matcher;
|
|
||||||
this.extractor = extractor;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected boolean matchesSafely(ClassT item, Description mismatchDescription) {
|
|
||||||
ItemT actual = extractor.apply(item);
|
|
||||||
if (!matcher.matches(actual)) {
|
|
||||||
mismatchDescription.appendText(fieldName).appendText(": ");
|
|
||||||
matcher.describeMismatch(actual, mismatchDescription);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void describeTo(Description description) {
|
|
||||||
description.appendText(fieldName).appendText(": ").appendDescriptionOf(matcher);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private KinesisClientRecord.KinesisClientRecordBuilder defaultRecord() {
|
|
||||||
return KinesisClientRecord.builder()
|
|
||||||
.partitionKey("test-partition")
|
|
||||||
.sequenceNumber("123")
|
|
||||||
.approximateArrivalTimestamp(Instant.now());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,84 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.multilang.messages;
|
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
import java.util.Collections;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import org.junit.Assert;
|
|
||||||
import org.junit.Test;
|
|
||||||
import software.amazon.kinesis.lifecycle.ShutdownReason;
|
|
||||||
import software.amazon.kinesis.lifecycle.events.InitializationInput;
|
|
||||||
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
|
|
||||||
import software.amazon.kinesis.retrieval.KinesisClientRecord;
|
|
||||||
|
|
||||||
public class MessageTest {
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void toStringTest() {
|
|
||||||
Message[] messages = new Message[] {
|
|
||||||
new CheckpointMessage("1234567890", 0L, null),
|
|
||||||
new InitializeMessage(
|
|
||||||
InitializationInput.builder().shardId("shard-123").build()),
|
|
||||||
new ProcessRecordsMessage(ProcessRecordsInput.builder()
|
|
||||||
.records(Collections.singletonList(KinesisClientRecord.builder()
|
|
||||||
.data(ByteBuffer.wrap("cat".getBytes()))
|
|
||||||
.partitionKey("cat")
|
|
||||||
.sequenceNumber("555")
|
|
||||||
.build()))
|
|
||||||
.build()),
|
|
||||||
new ShutdownMessage(ShutdownReason.LEASE_LOST),
|
|
||||||
new StatusMessage("processRecords"),
|
|
||||||
new InitializeMessage(),
|
|
||||||
new ProcessRecordsMessage(),
|
|
||||||
new ShutdownRequestedMessage(),
|
|
||||||
new LeaseLostMessage(),
|
|
||||||
new ShardEndedMessage(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: fix this
|
|
||||||
for (int i = 0; i < messages.length; i++) {
|
|
||||||
System.out.println(messages[i].toString());
|
|
||||||
Assert.assertTrue(
|
|
||||||
"Each message should contain the action field",
|
|
||||||
messages[i].toString().contains("action"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hit this constructor
|
|
||||||
KinesisClientRecord defaultJsonFriendlyRecord =
|
|
||||||
KinesisClientRecord.builder().build();
|
|
||||||
Assert.assertNull(defaultJsonFriendlyRecord.partitionKey());
|
|
||||||
Assert.assertNull(defaultJsonFriendlyRecord.data());
|
|
||||||
Assert.assertNull(defaultJsonFriendlyRecord.sequenceNumber());
|
|
||||||
Assert.assertNull(new ShutdownMessage(null).getReason());
|
|
||||||
|
|
||||||
// Hit the bad object mapping path
|
|
||||||
Message withBadMapper = new Message() {}.withObjectMapper(new ObjectMapper() {
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
private static final long serialVersionUID = 1L;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String writeValueAsString(Object m) throws JsonProcessingException {
|
|
||||||
throw new JsonProcessingException(new Throwable()) {};
|
|
||||||
}
|
|
||||||
});
|
|
||||||
String s = withBadMapper.toString();
|
|
||||||
Assert.assertNotNull(s);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8" ?>
|
|
||||||
<!--
|
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
-->
|
|
||||||
<configuration>
|
|
||||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
|
||||||
<encoder>
|
|
||||||
<pattern>%d [%thread] %-5level %logger{36} [%mdc{ShardId:-NONE}] - %msg %n</pattern>
|
|
||||||
</encoder>
|
|
||||||
</appender>
|
|
||||||
|
|
||||||
<root level="INFO">
|
|
||||||
<appender-ref ref="CONSOLE" />
|
|
||||||
</root>
|
|
||||||
</configuration>
|
|
||||||
|
|
@ -1,175 +0,0 @@
|
||||||
# The script that abides by the multi-language protocol. This script will
|
|
||||||
# be executed by the MultiLangDaemon, which will communicate with this script
|
|
||||||
# over STDIN and STDOUT according to the multi-language protocol.
|
|
||||||
executableName = sample_kclpy_app.py
|
|
||||||
|
|
||||||
# The Stream arn: arn:aws:kinesis:<region>:<account id>:stream/<stream name>
|
|
||||||
# Important: streamArn takes precedence over streamName if both are set
|
|
||||||
streamArn = arn:aws:kinesis:us-east-5:000000000000:stream/kclpysample
|
|
||||||
|
|
||||||
# The name of an Amazon Kinesis stream to process.
|
|
||||||
# Important: streamArn takes precedence over streamName if both are set
|
|
||||||
streamName = kclpysample
|
|
||||||
|
|
||||||
# Used by the KCL as the name of this application. Will be used as the name
|
|
||||||
# of an Amazon DynamoDB table which will store the lease and checkpoint
|
|
||||||
# information for workers with this application name
|
|
||||||
applicationName = MultiLangTest
|
|
||||||
|
|
||||||
# Users can change the credentials provider the KCL will use to retrieve credentials.
|
|
||||||
# Expected key name (case-sensitive):
|
|
||||||
# AwsCredentialsProvider / AwsCredentialsProviderDynamoDB / AwsCredentialsProviderCloudWatch
|
|
||||||
# The DefaultCredentialsProvider checks several other providers, which is
|
|
||||||
# described here:
|
|
||||||
# https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.html
|
|
||||||
AwsCredentialsProvider = DefaultCredentialsProvider
|
|
||||||
|
|
||||||
# Appended to the user agent of the KCL. Does not impact the functionality of the
|
|
||||||
# KCL in any other way.
|
|
||||||
processingLanguage = python/3.8
|
|
||||||
|
|
||||||
# Valid options at TRIM_HORIZON or LATEST.
|
|
||||||
# See http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#API_GetShardIterator_RequestSyntax
|
|
||||||
initialPositionInStream = TRIM_HORIZON
|
|
||||||
|
|
||||||
# To specify an initial timestamp from which to start processing records, please specify timestamp value for 'initiatPositionInStreamExtended',
|
|
||||||
# and uncomment below line with right timestamp value.
|
|
||||||
# See more from 'Timestamp' under http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#API_GetShardIterator_RequestSyntax
|
|
||||||
#initialPositionInStreamExtended = 1636609142
|
|
||||||
|
|
||||||
# The following properties are also available for configuring the KCL Worker that is created
|
|
||||||
# by the MultiLangDaemon.
|
|
||||||
|
|
||||||
# The KCL defaults to us-east-1
|
|
||||||
regionName = us-east-1
|
|
||||||
|
|
||||||
# Fail over time in milliseconds. A worker which does not renew it's lease within this time interval
|
|
||||||
# will be regarded as having problems and it's shards will be assigned to other workers.
|
|
||||||
# For applications that have a large number of shards, this msy be set to a higher number to reduce
|
|
||||||
# the number of DynamoDB IOPS required for tracking leases
|
|
||||||
failoverTimeMillis = 10000
|
|
||||||
|
|
||||||
# A worker id that uniquely identifies this worker among all workers using the same applicationName
|
|
||||||
# If this isn't provided a MultiLangDaemon instance will assign a unique workerId to itself.
|
|
||||||
workerId = "workerId"
|
|
||||||
|
|
||||||
# Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks.
|
|
||||||
shardSyncIntervalMillis = 60000
|
|
||||||
|
|
||||||
# Max records to fetch from Kinesis in a single GetRecords call.
|
|
||||||
maxRecords = 10000
|
|
||||||
|
|
||||||
# Idle time between record reads in milliseconds.
|
|
||||||
idleTimeBetweenReadsInMillis = 1000
|
|
||||||
|
|
||||||
# Enables applications flush/checkpoint (if they have some data "in progress", but don't get new data for while)
|
|
||||||
callProcessRecordsEvenForEmptyRecordList = false
|
|
||||||
|
|
||||||
# Interval in milliseconds between polling to check for parent shard completion.
|
|
||||||
# Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on
|
|
||||||
# completion of parent shards).
|
|
||||||
parentShardPollIntervalMillis = 10000
|
|
||||||
|
|
||||||
# Cleanup leases upon shards completion (don't wait until they expire in Kinesis).
|
|
||||||
# Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by default we try
|
|
||||||
# to delete the ones we don't need any longer.
|
|
||||||
cleanupLeasesUponShardCompletion = true
|
|
||||||
|
|
||||||
# Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures).
|
|
||||||
taskBackoffTimeMillis = 500
|
|
||||||
|
|
||||||
# Buffer metrics for at most this long before publishing to CloudWatch.
|
|
||||||
metricsBufferTimeMillis = 10000
|
|
||||||
|
|
||||||
# Buffer at most this many metrics before publishing to CloudWatch.
|
|
||||||
metricsMaxQueueSize = 10000
|
|
||||||
|
|
||||||
# KCL will validate client provided sequence numbers with a call to Amazon Kinesis before checkpointing for calls
|
|
||||||
# to RecordProcessorCheckpointer#checkpoint(String) by default.
|
|
||||||
validateSequenceNumberBeforeCheckpointing = true
|
|
||||||
|
|
||||||
# The maximum number of active threads for the MultiLangDaemon to permit.
|
|
||||||
# If a value is provided then a FixedThreadPool is used with the maximum
|
|
||||||
# active threads set to the provided value. If a non-positive integer or no
|
|
||||||
# value is provided a CachedThreadPool is used.
|
|
||||||
maxActiveThreads = -1
|
|
||||||
|
|
||||||
################### KclV3 configurations ###################
|
|
||||||
# NOTE : These are just test configurations to show how to customize
|
|
||||||
# all possible KCLv3 configurations. They are not necessarily the best
|
|
||||||
# default values to use for production.
|
|
||||||
|
|
||||||
# Coordinator config
|
|
||||||
# Version the KCL needs to operate in. For more details check the KCLv3 migration
|
|
||||||
# documentation. Default is CLIENT_VERSION_CONFIG_3X
|
|
||||||
clientVersionConfig = CLIENT_VERSION_CONFIG_COMPATIBLE_WITH_2x
|
|
||||||
# Configurations to control how the CoordinatorState DDB table is created
|
|
||||||
# Default name is applicationName-CoordinatorState in PAY_PER_REQUEST,
|
|
||||||
# with PITR and deletion protection disabled and no tags
|
|
||||||
coordinatorStateTableName = MultiLangTest-CoordinatorState-CustomName
|
|
||||||
coordinatorStateBillingMode = PROVISIONED
|
|
||||||
coordinatorStateReadCapacity = 1000
|
|
||||||
coordinatorStateWriteCapacity = 500
|
|
||||||
coordinatorStatePointInTimeRecoveryEnabled = true
|
|
||||||
coordinatorStateDeletionProtectionEnabled = true
|
|
||||||
coordinatorStateTags = csTagK1=csTagV1,csTagK2=csTagV2,csTagK3=csTagV3
|
|
||||||
|
|
||||||
# Graceful handoff config - tuning of the shutdown behavior during lease transfers
|
|
||||||
# default values are 30000 and true respectively
|
|
||||||
gracefulLeaseHandoffTimeoutMillis = 10000
|
|
||||||
isGracefulLeaseHandoffEnabled = false
|
|
||||||
|
|
||||||
# WorkerMetricStats table config - control how the DDB table is created
|
|
||||||
# Default name is applicationName-WorkerMetricStats in PAY_PER_REQUEST,
|
|
||||||
# with PITR and deletion protection disabled and no tags
|
|
||||||
workerMetricsTableName = MultiLangTest-WorkerMetrics-CustomName
|
|
||||||
workerMetricsBillingMode = PROVISIONED
|
|
||||||
workerMetricsReadCapacity = 250
|
|
||||||
workerMetricsWriteCapacity = 90
|
|
||||||
workerMetricsPointInTimeRecoveryEnabled = true
|
|
||||||
workerMetricsDeletionProtectionEnabled = true
|
|
||||||
workerMetricsTags = wmTagK1=wmTagV1,wmTagK2=wmTagV2
|
|
||||||
|
|
||||||
# WorkerUtilizationAwareAssignment config - tune the new KCLv3 Lease balancing algorithm
|
|
||||||
#
|
|
||||||
# frequency of capturing worker metrics in memory. Default is 1s
|
|
||||||
inMemoryWorkerMetricsCaptureFrequencyMillis = 5000
|
|
||||||
# frequency of reporting worker metric stats to storage. Default is 30s
|
|
||||||
workerMetricsReporterFreqInMillis = 60000
|
|
||||||
# No. of metricStats that are persisted in WorkerMetricStats ddb table, default is 10
|
|
||||||
noOfPersistedMetricsPerWorkerMetrics = 50
|
|
||||||
# Disable use of worker metrics to balance lease, default is false.
|
|
||||||
# If it is true, the algorithm balances lease based on worker's processing throughput.
|
|
||||||
disableWorkerMetrics = true
|
|
||||||
# Max throughput per host 10 MBps, to limit processing to the given value
|
|
||||||
# Default is unlimited.
|
|
||||||
maxThroughputPerHostKBps = 10000
|
|
||||||
# Dampen the load that is rebalanced during lease re-balancing, default is 60%
|
|
||||||
dampeningPercentage = 90
|
|
||||||
# Configures the allowed variance range for worker utilization. The upper
|
|
||||||
# limit is calculated as average * (1 + reBalanceThresholdPercentage/100).
|
|
||||||
# The lower limit is average * (1 - reBalanceThresholdPercentage/100). If
|
|
||||||
# any worker's utilization falls outside this range, lease re-balancing is
|
|
||||||
# triggered. The re-balancing algorithm aims to bring variance within the
|
|
||||||
# specified range. It also avoids thrashing by ensuring the utilization of
|
|
||||||
# the worker receiving the load after re-balancing doesn't exceed the fleet
|
|
||||||
# average. This might cause no re-balancing action even the utilization is
|
|
||||||
# out of the variance range. The default value is 10, representing +/-10%
|
|
||||||
# variance from the average value.
|
|
||||||
reBalanceThresholdPercentage = 5
|
|
||||||
# Whether at-least one lease must be taken from a high utilization worker
|
|
||||||
# during re-balancing when there is no lease assigned to that worker which has
|
|
||||||
# throughput is less than or equal to the minimum throughput that needs to be
|
|
||||||
# moved away from that worker to bring the worker back into the allowed variance.
|
|
||||||
# Default is true.
|
|
||||||
allowThroughputOvershoot = false
|
|
||||||
# Lease assignment is performed every failoverTimeMillis but re-balance will
|
|
||||||
# be attempted only once in 5 times based on the below config. Default is 3.
|
|
||||||
varianceBalancingFrequency = 5
|
|
||||||
# Alpha value used for calculating exponential moving average of worker's metricStats.
|
|
||||||
workerMetricsEMAAlpha = 0.18
|
|
||||||
# Duration after which workerMetricStats entry from WorkerMetricStats table will
|
|
||||||
# be cleaned up.
|
|
||||||
# Duration format examples: PT15M (15 mins) PT10H (10 hours) P2D (2 days)
|
|
||||||
# Refer to Duration.parse javadocs for more details
|
|
||||||
staleWorkerMetricsEntryCleanupDuration = PT12H
|
|
||||||
|
|
@ -1,100 +0,0 @@
|
||||||
# The script that abides by the multi-language protocol. This script will
|
|
||||||
# be executed by the MultiLangDaemon, which will communicate with this script
|
|
||||||
# over STDIN and STDOUT according to the multi-language protocol.
|
|
||||||
executableName = sample_kclpy_app.py
|
|
||||||
|
|
||||||
# The Stream arn: arn:aws:kinesis:<region>:<account id>:stream/<stream name>
|
|
||||||
# Important: streamArn takes precedence over streamName if both are set
|
|
||||||
streamArn = arn:aws:kinesis:us-east-5:000000000000:stream/kclpysample
|
|
||||||
|
|
||||||
# The name of an Amazon Kinesis stream to process.
|
|
||||||
# Important: streamArn takes precedence over streamName if both are set
|
|
||||||
streamName = kclpysample
|
|
||||||
|
|
||||||
# Used by the KCL as the name of this application. Will be used as the name
|
|
||||||
# of an Amazon DynamoDB table which will store the lease and checkpoint
|
|
||||||
# information for workers with this application name
|
|
||||||
applicationName = MultiLangTest
|
|
||||||
|
|
||||||
# Users can change the credentials provider the KCL will use to retrieve credentials.
|
|
||||||
# Expected key name (case-sensitive):
|
|
||||||
# AwsCredentialsProvider / AwsCredentialsProviderDynamoDB / AwsCredentialsProviderCloudWatch
|
|
||||||
# The DefaultCredentialsProvider checks several other providers, which is
|
|
||||||
# described here:
|
|
||||||
# https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.html
|
|
||||||
AwsCredentialsProvider = DefaultCredentialsProvider
|
|
||||||
|
|
||||||
# Appended to the user agent of the KCL. Does not impact the functionality of the
|
|
||||||
# KCL in any other way.
|
|
||||||
processingLanguage = python/3.8
|
|
||||||
|
|
||||||
# Valid options at TRIM_HORIZON or LATEST.
|
|
||||||
# See http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#API_GetShardIterator_RequestSyntax
|
|
||||||
initialPositionInStream = TRIM_HORIZON
|
|
||||||
|
|
||||||
# To specify an initial timestamp from which to start processing records, please specify timestamp value for 'initiatPositionInStreamExtended',
|
|
||||||
# and uncomment below line with right timestamp value.
|
|
||||||
# See more from 'Timestamp' under http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html#API_GetShardIterator_RequestSyntax
|
|
||||||
#initialPositionInStreamExtended = 1636609142
|
|
||||||
|
|
||||||
# The following properties are also available for configuring the KCL Worker that is created
|
|
||||||
# by the MultiLangDaemon.
|
|
||||||
|
|
||||||
# The KCL defaults to us-east-1
|
|
||||||
regionName = us-east-1
|
|
||||||
|
|
||||||
# Fail over time in milliseconds. A worker which does not renew it's lease within this time interval
|
|
||||||
# will be regarded as having problems and it's shards will be assigned to other workers.
|
|
||||||
# For applications that have a large number of shards, this msy be set to a higher number to reduce
|
|
||||||
# the number of DynamoDB IOPS required for tracking leases
|
|
||||||
failoverTimeMillis = 10000
|
|
||||||
|
|
||||||
# A worker id that uniquely identifies this worker among all workers using the same applicationName
|
|
||||||
# If this isn't provided a MultiLangDaemon instance will assign a unique workerId to itself.
|
|
||||||
workerId = "workerId"
|
|
||||||
|
|
||||||
# Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks.
|
|
||||||
shardSyncIntervalMillis = 60000
|
|
||||||
|
|
||||||
# Max records to fetch from Kinesis in a single GetRecords call.
|
|
||||||
maxRecords = 10000
|
|
||||||
|
|
||||||
# Idle time between record reads in milliseconds.
|
|
||||||
idleTimeBetweenReadsInMillis = 1000
|
|
||||||
|
|
||||||
# Enables applications flush/checkpoint (if they have some data "in progress", but don't get new data for while)
|
|
||||||
callProcessRecordsEvenForEmptyRecordList = false
|
|
||||||
|
|
||||||
# Interval in milliseconds between polling to check for parent shard completion.
|
|
||||||
# Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on
|
|
||||||
# completion of parent shards).
|
|
||||||
parentShardPollIntervalMillis = 10000
|
|
||||||
|
|
||||||
# Cleanup leases upon shards completion (don't wait until they expire in Kinesis).
|
|
||||||
# Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by default we try
|
|
||||||
# to delete the ones we don't need any longer.
|
|
||||||
cleanupLeasesUponShardCompletion = true
|
|
||||||
|
|
||||||
# Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures).
|
|
||||||
taskBackoffTimeMillis = 500
|
|
||||||
|
|
||||||
# Buffer metrics for at most this long before publishing to CloudWatch.
|
|
||||||
metricsBufferTimeMillis = 10000
|
|
||||||
|
|
||||||
# Buffer at most this many metrics before publishing to CloudWatch.
|
|
||||||
metricsMaxQueueSize = 10000
|
|
||||||
|
|
||||||
# KCL will validate client provided sequence numbers with a call to Amazon Kinesis before checkpointing for calls
|
|
||||||
# to RecordProcessorCheckpointer#checkpoint(String) by default.
|
|
||||||
validateSequenceNumberBeforeCheckpointing = true
|
|
||||||
|
|
||||||
# The maximum number of active threads for the MultiLangDaemon to permit.
|
|
||||||
# If a value is provided then a FixedThreadPool is used with the maximum
|
|
||||||
# active threads set to the provided value. If a non-positive integer or no
|
|
||||||
# value is provided a CachedThreadPool is used.
|
|
||||||
maxActiveThreads = -1
|
|
||||||
|
|
||||||
################### KclV3 configurations ###################
|
|
||||||
# Coordinator config
|
|
||||||
clientVersionConfig = CLIENT_VERSION_CONFIG_3x
|
|
||||||
## Let all other KCLv3 config use defaults
|
|
||||||
|
|
@ -1,591 +0,0 @@
|
||||||
<!--
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
-->
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
|
||||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/maven-v4_0_0.xsd">
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
|
|
||||||
<parent>
|
|
||||||
<groupId>software.amazon.kinesis</groupId>
|
|
||||||
<artifactId>amazon-kinesis-client-pom</artifactId>
|
|
||||||
<version>3.0.3</version>
|
|
||||||
</parent>
|
|
||||||
|
|
||||||
<artifactId>amazon-kinesis-client</artifactId>
|
|
||||||
<packaging>jar</packaging>
|
|
||||||
<name>Amazon Kinesis Client Library for Java</name>
|
|
||||||
|
|
||||||
<description>The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data
|
|
||||||
from Amazon Kinesis.
|
|
||||||
</description>
|
|
||||||
<url>https://aws.amazon.com/kinesis</url>
|
|
||||||
|
|
||||||
<scm>
|
|
||||||
<url>https://github.com/awslabs/amazon-kinesis-client.git</url>
|
|
||||||
</scm>
|
|
||||||
|
|
||||||
<licenses>
|
|
||||||
<license>
|
|
||||||
<name>Apache License, Version 2.0</name>
|
|
||||||
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
|
|
||||||
<distribution>repo</distribution>
|
|
||||||
</license>
|
|
||||||
</licenses>
|
|
||||||
|
|
||||||
<properties>
|
|
||||||
<protobuf.version>4.27.5</protobuf.version>
|
|
||||||
<sqlite4java.version>1.0.392</sqlite4java.version>
|
|
||||||
<sqlite4java.native>libsqlite4java</sqlite4java.native>
|
|
||||||
<sqlite4java.libpath>${project.build.directory}/test-lib</sqlite4java.libpath>
|
|
||||||
<slf4j.version>2.0.13</slf4j.version>
|
|
||||||
<gsr.version>1.1.19</gsr.version>
|
|
||||||
<skipITs>true</skipITs>
|
|
||||||
</properties>
|
|
||||||
|
|
||||||
<dependencies>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>kinesis</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>dynamodb</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<!-- https://mvnrepository.com/artifact/software.amazon.awssdk/dynamodb-enhanced -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>dynamodb-enhanced</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<!-- https://mvnrepository.com/artifact/com.amazonaws/dynamodb-lock-client -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.amazonaws</groupId>
|
|
||||||
<artifactId>dynamodb-lock-client</artifactId>
|
|
||||||
<version>1.3.0</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>cloudwatch</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>netty-nio-client</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>sdk-core</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>aws-core</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>arns</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>regions</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>utils</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>http-client-spi</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>dynamodb-enhanced</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.glue</groupId>
|
|
||||||
<artifactId>schema-registry-serde</artifactId>
|
|
||||||
<version>${gsr.version}</version>
|
|
||||||
<exclusions>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>com.amazonaws</groupId>
|
|
||||||
<artifactId>aws-java-sdk-sts</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
</exclusions>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.glue</groupId>
|
|
||||||
<artifactId>schema-registry-common</artifactId>
|
|
||||||
<version>${gsr.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.google.guava</groupId>
|
|
||||||
<artifactId>guava</artifactId>
|
|
||||||
<version>32.1.1-jre</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.google.protobuf</groupId>
|
|
||||||
<artifactId>protobuf-java</artifactId>
|
|
||||||
<version>${protobuf.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-lang3</artifactId>
|
|
||||||
<version>3.14.0</version>
|
|
||||||
</dependency>
|
|
||||||
<!-- https://mvnrepository.com/artifact/commons-collections/commons-collections -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>commons-collections</groupId>
|
|
||||||
<artifactId>commons-collections</artifactId>
|
|
||||||
<version>3.2.2</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-collections4</artifactId>
|
|
||||||
<version>4.4</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.netty</groupId>
|
|
||||||
<artifactId>netty-handler</artifactId>
|
|
||||||
<version>4.1.118.Final</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.google.code.findbugs</groupId>
|
|
||||||
<artifactId>jsr305</artifactId>
|
|
||||||
<version>3.0.2</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.fasterxml.jackson.core</groupId>
|
|
||||||
<artifactId>jackson-databind</artifactId>
|
|
||||||
<version>2.12.7.1</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.reactivestreams</groupId>
|
|
||||||
<artifactId>reactive-streams</artifactId>
|
|
||||||
<version>1.0.4</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>annotations</artifactId>
|
|
||||||
<version>2.25.64</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.slf4j</groupId>
|
|
||||||
<artifactId>slf4j-api</artifactId>
|
|
||||||
<version>${slf4j.version}</version>
|
|
||||||
</dependency>
|
|
||||||
<!-- https://mvnrepository.com/artifact/org.jetbrains/annotations -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.jetbrains</groupId>
|
|
||||||
<artifactId>annotations</artifactId>
|
|
||||||
<version>26.0.1</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.reactivex.rxjava3</groupId>
|
|
||||||
<artifactId>rxjava</artifactId>
|
|
||||||
<version>3.1.8</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.projectlombok</groupId>
|
|
||||||
<artifactId>lombok</artifactId>
|
|
||||||
<version>1.18.24</version>
|
|
||||||
<scope>provided</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<!-- Test -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>sts</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>software.amazon.awssdk</groupId>
|
|
||||||
<artifactId>auth</artifactId>
|
|
||||||
<version>${awssdk.version}</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<!-- TODO: Migrate all tests to Junit5 -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.junit.jupiter</groupId>
|
|
||||||
<artifactId>junit-jupiter-api</artifactId>
|
|
||||||
<version>5.11.3</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>junit</groupId>
|
|
||||||
<artifactId>junit</artifactId>
|
|
||||||
<version>4.13.2</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<!-- https://mvnrepository.com/artifact/org.junit.jupiter/junit-jupiter-params -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.junit.jupiter</groupId>
|
|
||||||
<artifactId>junit-jupiter-params</artifactId>
|
|
||||||
<version>5.11.3</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<!-- Using older version to be compatible with Java 8 -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.mockito</groupId>
|
|
||||||
<artifactId>mockito-junit-jupiter</artifactId>
|
|
||||||
<version>3.12.4</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.mockito</groupId>
|
|
||||||
<artifactId>mockito-core</artifactId>
|
|
||||||
<version>3.12.4</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.hamcrest</groupId>
|
|
||||||
<artifactId>hamcrest-all</artifactId>
|
|
||||||
<version>1.3</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.hamcrest</groupId>
|
|
||||||
<artifactId>hamcrest-core</artifactId>
|
|
||||||
<version>1.3</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<!-- Using older version to be compatible with Java 8 -->
|
|
||||||
<!-- https://mvnrepository.com/artifact/com.amazonaws/DynamoDBLocal -->
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.amazonaws</groupId>
|
|
||||||
<artifactId>DynamoDBLocal</artifactId>
|
|
||||||
<version>1.25.0</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>ch.qos.logback</groupId>
|
|
||||||
<artifactId>logback-classic</artifactId>
|
|
||||||
<version>1.3.14</version>
|
|
||||||
<scope>test</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
</dependencies>
|
|
||||||
|
|
||||||
<!--<repositories>-->
|
|
||||||
<!--<repository>-->
|
|
||||||
<!--<id>dynamodblocal</id>-->
|
|
||||||
<!--<name>AWS DynamoDB Local Release Repository</name>-->
|
|
||||||
<!--<url>https://s3-us-west-2.amazonaws.com/dynamodb-local/release</url>-->
|
|
||||||
<!--</repository>-->
|
|
||||||
<!--</repositories>-->
|
|
||||||
|
|
||||||
<developers>
|
|
||||||
<developer>
|
|
||||||
<id>amazonwebservices</id>
|
|
||||||
<organization>Amazon Web Services</organization>
|
|
||||||
<organizationUrl>https://aws.amazon.com</organizationUrl>
|
|
||||||
<roles>
|
|
||||||
<role>developer</role>
|
|
||||||
</roles>
|
|
||||||
</developer>
|
|
||||||
</developers>
|
|
||||||
|
|
||||||
<build>
|
|
||||||
<extensions>
|
|
||||||
<extension>
|
|
||||||
<groupId>kr.motd.maven</groupId>
|
|
||||||
<artifactId>os-maven-plugin</artifactId>
|
|
||||||
<version>1.6.0</version>
|
|
||||||
</extension>
|
|
||||||
</extensions>
|
|
||||||
<pluginManagement>
|
|
||||||
<plugins>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-compiler-plugin</artifactId>
|
|
||||||
<version>3.13.0</version>
|
|
||||||
<configuration>
|
|
||||||
<release>8</release>
|
|
||||||
<encoding>UTF-8</encoding>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
|
||||||
</pluginManagement>
|
|
||||||
|
|
||||||
<plugins>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.xolstice.maven.plugins</groupId>
|
|
||||||
<artifactId>protobuf-maven-plugin</artifactId>
|
|
||||||
<version>0.6.1</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<goals>
|
|
||||||
<goal>compile</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
<configuration>
|
|
||||||
<protocArtifact>com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}</protocArtifact>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
|
||||||
<version>3.2.5</version>
|
|
||||||
<configuration>
|
|
||||||
<skipTests>${skip.ut}</skipTests>
|
|
||||||
<skipITs>${skipITs}</skipITs>
|
|
||||||
<excludes>
|
|
||||||
<exclude>**/*IntegrationTest.java</exclude>
|
|
||||||
</excludes>
|
|
||||||
<systemPropertyVariables>
|
|
||||||
<sqlite4java.library.path>${sqlite4java.libpath}</sqlite4java.library.path>
|
|
||||||
<awsProfile>${awsProfile}</awsProfile>
|
|
||||||
</systemPropertyVariables>
|
|
||||||
</configuration>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-failsafe-plugin</artifactId>
|
|
||||||
<version>3.2.5</version>
|
|
||||||
<configuration>
|
|
||||||
<includes>
|
|
||||||
<include>**/*IntegrationTest.java</include>
|
|
||||||
</includes>
|
|
||||||
</configuration>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<goals>
|
|
||||||
<goal>integration-test</goal>
|
|
||||||
<goal>verify</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-dependency-plugin</artifactId>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>copy</id>
|
|
||||||
<phase>test-compile</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>copy</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<artifactItems>
|
|
||||||
<!-- Mac OS X -->
|
|
||||||
<artifactItem>
|
|
||||||
<groupId>com.almworks.sqlite4java</groupId>
|
|
||||||
<artifactId>${sqlite4java.native}-osx</artifactId>
|
|
||||||
<version>${sqlite4java.version}</version>
|
|
||||||
<type>dylib</type>
|
|
||||||
<overWrite>true</overWrite>
|
|
||||||
<outputDirectory>${sqlite4java.libpath}</outputDirectory>
|
|
||||||
</artifactItem>
|
|
||||||
|
|
||||||
<!-- Linux -->
|
|
||||||
<!-- i386 -->
|
|
||||||
<artifactItem>
|
|
||||||
<groupId>com.almworks.sqlite4java</groupId>
|
|
||||||
<artifactId>${sqlite4java.native}-linux-i386</artifactId>
|
|
||||||
<version>${sqlite4java.version}</version>
|
|
||||||
<type>so</type>
|
|
||||||
<overWrite>true</overWrite>
|
|
||||||
<outputDirectory>${sqlite4java.libpath}</outputDirectory>
|
|
||||||
</artifactItem>
|
|
||||||
|
|
||||||
<!-- amd64 -->
|
|
||||||
<artifactItem>
|
|
||||||
<groupId>com.almworks.sqlite4java</groupId>
|
|
||||||
<artifactId>${sqlite4java.native}-linux-amd64</artifactId>
|
|
||||||
<version>${sqlite4java.version}</version>
|
|
||||||
<type>so</type>
|
|
||||||
<overWrite>true</overWrite>
|
|
||||||
<outputDirectory>${sqlite4java.libpath}</outputDirectory>
|
|
||||||
</artifactItem>
|
|
||||||
|
|
||||||
<!-- Windows -->
|
|
||||||
<!-- x86 -->
|
|
||||||
<artifactItem>
|
|
||||||
<groupId>com.almworks.sqlite4java</groupId>
|
|
||||||
<artifactId>sqlite4java-win32-x86</artifactId>
|
|
||||||
<version>${sqlite4java.version}</version>
|
|
||||||
<type>dll</type>
|
|
||||||
<overWrite>true</overWrite>
|
|
||||||
<outputDirectory>${sqlite4java.libpath}</outputDirectory>
|
|
||||||
</artifactItem>
|
|
||||||
|
|
||||||
<!-- x64 -->
|
|
||||||
<artifactItem>
|
|
||||||
<groupId>com.almworks.sqlite4java</groupId>
|
|
||||||
<artifactId>sqlite4java-win32-x64</artifactId>
|
|
||||||
<version>${sqlite4java.version}</version>
|
|
||||||
<type>dll</type>
|
|
||||||
<overWrite>true</overWrite>
|
|
||||||
<outputDirectory>${sqlite4java.libpath}</outputDirectory>
|
|
||||||
</artifactItem>
|
|
||||||
</artifactItems>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-javadoc-plugin</artifactId>
|
|
||||||
<version>3.7.0</version>
|
|
||||||
<configuration>
|
|
||||||
<excludePackageNames>com.amazonaws.services.kinesis.producer.protobuf</excludePackageNames>
|
|
||||||
</configuration>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>attach-javadocs</id>
|
|
||||||
<goals>
|
|
||||||
<goal>jar</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-source-plugin</artifactId>
|
|
||||||
<version>3.2.1</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>attach-sources</id>
|
|
||||||
<goals>
|
|
||||||
<goal>jar</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
|
|
||||||
<!-- Required for generating maven version as a Java class for runtime access -->
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.codehaus.mojo</groupId>
|
|
||||||
<artifactId>templating-maven-plugin</artifactId>
|
|
||||||
<version>1.0.0</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>generate-version-class</id>
|
|
||||||
<goals>
|
|
||||||
<goal>filter-sources</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-resources-plugin</artifactId>
|
|
||||||
<version>3.3.1</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>copy-dist</id>
|
|
||||||
<phase>prepare-package</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>copy-resources</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<outputDirectory>${project.build.outputDirectory}</outputDirectory>
|
|
||||||
<resources>
|
|
||||||
<resource>
|
|
||||||
<directory>${project.basedir}/target/generated-sources/java-templates/</directory>
|
|
||||||
<filtering>false</filtering>
|
|
||||||
<excludes>
|
|
||||||
</excludes>
|
|
||||||
</resource>
|
|
||||||
</resources>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>com.diffplug.spotless</groupId>
|
|
||||||
<artifactId>spotless-maven-plugin</artifactId>
|
|
||||||
<version>2.30.0</version> <!--last version to support java 8-->
|
|
||||||
<configuration>
|
|
||||||
<java>
|
|
||||||
<palantirJavaFormat />
|
|
||||||
<importOrder>
|
|
||||||
<order>java,,\#</order>
|
|
||||||
</importOrder>
|
|
||||||
</java>
|
|
||||||
</configuration>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<goals>
|
|
||||||
<goal>check</goal>
|
|
||||||
</goals>
|
|
||||||
<phase>compile</phase>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>com.salesforce.servicelibs</groupId>
|
|
||||||
<artifactId>proto-backwards-compatibility</artifactId>
|
|
||||||
<version>1.0.7</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<goals>
|
|
||||||
<goal>backwards-compatibility-check</goal>
|
|
||||||
</goals>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
<plugin>
|
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
|
||||||
<artifactId>maven-dependency-plugin</artifactId>
|
|
||||||
<version>3.1.2</version>
|
|
||||||
<executions>
|
|
||||||
<execution>
|
|
||||||
<id>analyze-dependencies</id>
|
|
||||||
<phase>verify</phase>
|
|
||||||
<goals>
|
|
||||||
<goal>analyze-only</goal>
|
|
||||||
</goals>
|
|
||||||
<configuration>
|
|
||||||
<failOnWarning>true</failOnWarning>
|
|
||||||
<!-- Ignore Runtime/Provided/Test/System scopes for unused dependency analysis. -->
|
|
||||||
<ignoreNonCompile>true</ignoreNonCompile>
|
|
||||||
</configuration>
|
|
||||||
</execution>
|
|
||||||
</executions>
|
|
||||||
</plugin>
|
|
||||||
</plugins>
|
|
||||||
|
|
||||||
</build>
|
|
||||||
<profiles>
|
|
||||||
<profile>
|
|
||||||
<id>disable-java8-doclint</id>
|
|
||||||
<activation>
|
|
||||||
<jdk>[1.8,)</jdk>
|
|
||||||
</activation>
|
|
||||||
<properties>
|
|
||||||
<doclint>none</doclint>
|
|
||||||
</properties>
|
|
||||||
</profile>
|
|
||||||
</profiles>
|
|
||||||
|
|
||||||
</project>
|
|
||||||
|
|
@ -1,609 +0,0 @@
|
||||||
"""
|
|
||||||
Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
Licensed under the Apache License, Version 2.0 (the
|
|
||||||
"License"); you may not use this file except in compliance
|
|
||||||
with the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import time
|
|
||||||
|
|
||||||
from enum import Enum
|
|
||||||
import boto3
|
|
||||||
from botocore.config import Config
|
|
||||||
from botocore.exceptions import ClientError
|
|
||||||
|
|
||||||
# DynamoDB table suffixes
|
|
||||||
DEFAULT_COORDINATOR_STATE_TABLE_SUFFIX = "-CoordinatorState"
|
|
||||||
DEFAULT_WORKER_METRICS_TABLE_SUFFIX = "-WorkerMetricStats"
|
|
||||||
|
|
||||||
# DynamoDB attribute names and values
|
|
||||||
CLIENT_VERSION_ATTR = 'cv'
|
|
||||||
TIMESTAMP_ATTR = 'mts'
|
|
||||||
MODIFIED_BY_ATTR = 'mb'
|
|
||||||
HISTORY_ATTR = 'h'
|
|
||||||
MIGRATION_KEY = "Migration3.0"
|
|
||||||
|
|
||||||
# GSI constants
|
|
||||||
GSI_NAME = 'LeaseOwnerToLeaseKeyIndex'
|
|
||||||
GSI_DELETION_WAIT_TIME_SECONDS = 120
|
|
||||||
|
|
||||||
config = Config(
|
|
||||||
retries = {
|
|
||||||
'max_attempts': 10,
|
|
||||||
'mode': 'standard'
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
class KclClientVersion(Enum):
|
|
||||||
VERSION_2X = "CLIENT_VERSION_2X"
|
|
||||||
UPGRADE_FROM_2X = "CLIENT_VERSION_UPGRADE_FROM_2X"
|
|
||||||
VERSION_3X_WITH_ROLLBACK = "CLIENT_VERSION_3X_WITH_ROLLBACK"
|
|
||||||
VERSION_3X = "CLIENT_VERSION_3X"
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.value
|
|
||||||
|
|
||||||
|
|
||||||
def get_time_in_millis():
|
|
||||||
return str(round(time.time() * 1000))
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_version(version, mode):
|
|
||||||
"""
|
|
||||||
Validate if the given version is valid for the specified mode
|
|
||||||
|
|
||||||
:param version: The KCL client version to validate
|
|
||||||
:param mode: Either 'rollback' or 'rollforward'
|
|
||||||
:return: True if the version is valid for the given mode, False otherwise
|
|
||||||
"""
|
|
||||||
if mode == 'rollback':
|
|
||||||
if version == KclClientVersion.VERSION_2X.value:
|
|
||||||
print("Your KCL application already runs in a mode compatible with KCL 2.x. You can deploy the code with the previous KCL version if you still experience an issue.")
|
|
||||||
return True
|
|
||||||
if version in [KclClientVersion.UPGRADE_FROM_2X.value,
|
|
||||||
KclClientVersion.VERSION_3X_WITH_ROLLBACK.value]:
|
|
||||||
return True
|
|
||||||
if version == KclClientVersion.VERSION_3X.value:
|
|
||||||
print("Cannot roll back the KCL application."
|
|
||||||
" It is not in a state that supports rollback.")
|
|
||||||
return False
|
|
||||||
print("Migration to KCL 3.0 not in progress or application_name / coordinator_state_table_name is incorrect."
|
|
||||||
" Please double check and run again with correct arguments.")
|
|
||||||
return False
|
|
||||||
|
|
||||||
if mode == 'rollforward':
|
|
||||||
if version == KclClientVersion.VERSION_2X.value:
|
|
||||||
return True
|
|
||||||
if version in [KclClientVersion.UPGRADE_FROM_2X.value,
|
|
||||||
KclClientVersion.VERSION_3X_WITH_ROLLBACK.value]:
|
|
||||||
print("Cannot roll-forward application. It is not in a rolled back state.")
|
|
||||||
return False
|
|
||||||
if version == KclClientVersion.VERSION_3X.value:
|
|
||||||
print("Cannot roll-forward the KCL application."
|
|
||||||
" Application has already migrated.")
|
|
||||||
return False
|
|
||||||
print("Cannot roll-forward because migration to KCL 3.0 is not in progress or application_name"
|
|
||||||
" / coordinator_state_table_name is incorrect. Please double check and run again with correct arguments.")
|
|
||||||
return False
|
|
||||||
print(f"Invalid mode: {mode}. Mode must be either 'rollback' or 'rollforward'.")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def handle_get_item_client_error(e, operation, table_name):
|
|
||||||
"""
|
|
||||||
Handle ClientError exceptions raised by get_item on given DynamoDB table
|
|
||||||
|
|
||||||
:param e: The ClientError exception object
|
|
||||||
:param operation: Rollback or Roll-forward for logging the errors
|
|
||||||
:param table_name: The name of the DynamoDB table where the error occurred
|
|
||||||
"""
|
|
||||||
error_code = e.response['Error']['Code']
|
|
||||||
error_message = e.response['Error']['Message']
|
|
||||||
print(f"{operation} could not be performed.")
|
|
||||||
if error_code == 'ProvisionedThroughputExceededException':
|
|
||||||
print(f"Throughput exceeded even after retries: {error_message}")
|
|
||||||
else:
|
|
||||||
print(f"Unexpected client error occurred: {error_code} - {error_message}")
|
|
||||||
print("Please resolve the issue and run the KclMigrationTool again.")
|
|
||||||
|
|
||||||
|
|
||||||
def table_exists(dynamodb_client, table_name):
|
|
||||||
"""
|
|
||||||
Check if a DynamoDB table exists.
|
|
||||||
|
|
||||||
:param dynamodb_client: Boto3 DynamoDB client
|
|
||||||
:param table_name: Name of the DynamoDB table to check
|
|
||||||
:return: True if the table exists, False otherwise
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
dynamodb_client.describe_table(TableName=table_name)
|
|
||||||
return True
|
|
||||||
except ClientError as e:
|
|
||||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
|
||||||
print(f"Table '{table_name}' does not exist.")
|
|
||||||
return False
|
|
||||||
print(f"An error occurred while checking table '{table_name}': {e}.")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def validate_tables(dynamodb_client, operation, coordinator_state_table_name, lease_table_name=None):
|
|
||||||
"""
|
|
||||||
Validate the existence of DynamoDB tables required for KCL operations
|
|
||||||
|
|
||||||
:param dynamodb_client: A boto3 DynamoDB client object
|
|
||||||
:param operation: Rollback or Roll-forward for logging
|
|
||||||
:param coordinator_state_table_name: Name of the coordinator state table
|
|
||||||
:param lease_table_name: Name of the DynamoDB KCL lease table (optional)
|
|
||||||
:return: True if all required tables exist, False otherwise
|
|
||||||
"""
|
|
||||||
if lease_table_name and not table_exists(dynamodb_client, lease_table_name):
|
|
||||||
print(
|
|
||||||
f"{operation} failed. Could not find a KCL Application DDB lease table "
|
|
||||||
f"with name {lease_table_name}. Please pass in the correct application_name "
|
|
||||||
"and/or lease_table_name that matches your KCL application configuration."
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not table_exists(dynamodb_client, coordinator_state_table_name):
|
|
||||||
print(
|
|
||||||
f"{operation} failed. Could not find a coordinator state table "
|
|
||||||
f"{coordinator_state_table_name}. Please pass in the correct application_name or"
|
|
||||||
f" coordinator_state_table_name that matches your KCL application configuration."
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def add_current_state_to_history(item, max_history=10):
|
|
||||||
"""
|
|
||||||
Adds the current state of a DynamoDB item to its history attribute.
|
|
||||||
Creates a new history entry from the current value and maintains a capped history list.
|
|
||||||
|
|
||||||
:param item: DynamoDB item to add history to
|
|
||||||
:param max_history: Maximum number of history entries to maintain (default: 10)
|
|
||||||
:return: Updated history attribute as a DynamoDB-formatted dictionary
|
|
||||||
"""
|
|
||||||
# Extract current values
|
|
||||||
current_version = item.get(CLIENT_VERSION_ATTR, {}).get('S', 'Unknown')
|
|
||||||
current_modified_by = item.get(MODIFIED_BY_ATTR, {}).get('S', 'Unknown')
|
|
||||||
current_time_in_millis = (
|
|
||||||
item.get(TIMESTAMP_ATTR, {}).get('N', get_time_in_millis())
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create new history entry
|
|
||||||
new_entry = {
|
|
||||||
'M': {
|
|
||||||
CLIENT_VERSION_ATTR: {'S': current_version},
|
|
||||||
MODIFIED_BY_ATTR: {'S': current_modified_by},
|
|
||||||
TIMESTAMP_ATTR: {'N': current_time_in_millis}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get existing history or create new if doesn't exist
|
|
||||||
history_dict = item.get(f'{HISTORY_ATTR}', {'L': []})
|
|
||||||
history_list = history_dict['L']
|
|
||||||
|
|
||||||
# Add new entry to the beginning of the list, capping at max_history
|
|
||||||
history_list.insert(0, new_entry)
|
|
||||||
history_list = history_list[:max_history]
|
|
||||||
|
|
||||||
return history_dict
|
|
||||||
|
|
||||||
|
|
||||||
def get_current_state(dynamodb_client, table_name):
|
|
||||||
"""
|
|
||||||
Retrieve the current state from the DynamoDB table and prepare history update.
|
|
||||||
Fetches the current item from the specified DynamoDB table,
|
|
||||||
extracts the initial client version, and creates a new history entry.
|
|
||||||
|
|
||||||
:param dynamodb_client: Boto3 DynamoDB client
|
|
||||||
:param table_name: Name of the DynamoDB table to query
|
|
||||||
:return: A tuple containing:
|
|
||||||
- initial_version (str): The current client version, or 'Unknown' if not found
|
|
||||||
- new_history (dict): Updated history including the current state
|
|
||||||
"""
|
|
||||||
response = dynamodb_client.get_item(
|
|
||||||
TableName=table_name,
|
|
||||||
Key={'key': {'S': MIGRATION_KEY}}
|
|
||||||
)
|
|
||||||
item = response.get('Item', {})
|
|
||||||
initial_version = item.get(CLIENT_VERSION_ATTR, {}).get('S', 'Unknown')
|
|
||||||
new_history = add_current_state_to_history(item)
|
|
||||||
return initial_version, new_history
|
|
||||||
|
|
||||||
|
|
||||||
def rollback_client_version(dynamodb_client, table_name, history):
|
|
||||||
"""
|
|
||||||
Update the client version in the coordinator state table to initiate rollback.
|
|
||||||
|
|
||||||
:param dynamodb_client: Boto3 DynamoDB client
|
|
||||||
:param table_name: Name of the coordinator state DDB table
|
|
||||||
:param history: Updated history attribute as a DynamoDB-formatted dictionary
|
|
||||||
:return: A tuple containing:
|
|
||||||
- success (bool): True if client version was successfully updated, False otherwise
|
|
||||||
- previous_version (str): The version that was replaced, or None if update failed
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
print(f"Rolling back client version in table '{table_name}'...")
|
|
||||||
update_response = dynamodb_client.update_item(
|
|
||||||
TableName=table_name,
|
|
||||||
Key={'key': {'S': MIGRATION_KEY}},
|
|
||||||
UpdateExpression=(
|
|
||||||
f"SET {CLIENT_VERSION_ATTR} = :rollback_client_version, "
|
|
||||||
f"{TIMESTAMP_ATTR} = :updated_at, "
|
|
||||||
f"{MODIFIED_BY_ATTR} = :modifier, "
|
|
||||||
f"{HISTORY_ATTR} = :history"
|
|
||||||
),
|
|
||||||
ConditionExpression=(
|
|
||||||
f"{CLIENT_VERSION_ATTR} IN ("
|
|
||||||
":upgrade_from_2x_client_version, "
|
|
||||||
":3x_with_rollback_client_version)"
|
|
||||||
),
|
|
||||||
ExpressionAttributeValues={
|
|
||||||
':rollback_client_version': {'S': KclClientVersion.VERSION_2X.value},
|
|
||||||
':updated_at': {'N': get_time_in_millis()},
|
|
||||||
':modifier': {'S': 'KclMigrationTool-rollback'},
|
|
||||||
':history': history,
|
|
||||||
':upgrade_from_2x_client_version': (
|
|
||||||
{'S': KclClientVersion.UPGRADE_FROM_2X.value}
|
|
||||||
),
|
|
||||||
':3x_with_rollback_client_version': (
|
|
||||||
{'S': KclClientVersion.VERSION_3X_WITH_ROLLBACK.value}
|
|
||||||
),
|
|
||||||
},
|
|
||||||
ReturnValues='UPDATED_OLD'
|
|
||||||
)
|
|
||||||
replaced_item = update_response.get('Attributes', {})
|
|
||||||
replaced_version = replaced_item.get('cv', {}).get('S', '')
|
|
||||||
return True, replaced_version
|
|
||||||
except ClientError as e:
|
|
||||||
if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
|
|
||||||
print("Unable to rollback, as application is not in a state that allows rollback."
|
|
||||||
"Ensure that the given application_name or coordinator_state_table_name is correct and"
|
|
||||||
" you have followed all prior migration steps.")
|
|
||||||
else:
|
|
||||||
print(f"An unexpected error occurred while rolling back: {str(e)}"
|
|
||||||
"Please resolve and run this migration script again.")
|
|
||||||
return False, None
|
|
||||||
|
|
||||||
|
|
||||||
def rollfoward_client_version(dynamodb_client, table_name, history):
|
|
||||||
"""
|
|
||||||
Update the client version in the coordinator state table to initiate roll-forward
|
|
||||||
conditionally if application is currently in rolled back state.
|
|
||||||
|
|
||||||
:param dynamodb_client: Boto3 DynamoDB client
|
|
||||||
:param table_name: Name of the coordinator state DDB table
|
|
||||||
:param history: Updated history attribute as a DynamoDB-formatted dictionary
|
|
||||||
:return: True if client version was successfully updated, False otherwise
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# Conditionally update client version
|
|
||||||
dynamodb_client.update_item(
|
|
||||||
TableName=table_name,
|
|
||||||
Key={'key': {'S': MIGRATION_KEY}},
|
|
||||||
UpdateExpression= (
|
|
||||||
f"SET {CLIENT_VERSION_ATTR} = :rollforward_version, "
|
|
||||||
f"{TIMESTAMP_ATTR} = :updated_at, "
|
|
||||||
f"{MODIFIED_BY_ATTR} = :modifier, "
|
|
||||||
f"{HISTORY_ATTR} = :new_history"
|
|
||||||
),
|
|
||||||
ConditionExpression=f"{CLIENT_VERSION_ATTR} = :kcl_2x_version",
|
|
||||||
ExpressionAttributeValues={
|
|
||||||
':rollforward_version': {'S': KclClientVersion.UPGRADE_FROM_2X.value},
|
|
||||||
':updated_at': {'N': get_time_in_millis()},
|
|
||||||
':modifier': {'S': 'KclMigrationTool-rollforward'},
|
|
||||||
':new_history': history,
|
|
||||||
':kcl_2x_version': {'S': KclClientVersion.VERSION_2X.value},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
print("Roll-forward has been initiated. KCL application will monitor for 3.0 readiness and"
|
|
||||||
" automatically switch to 3.0 functionality when readiness criteria have been met.")
|
|
||||||
except ClientError as e:
|
|
||||||
if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
|
|
||||||
print("Unable to roll-forward because application is not in rolled back state."
|
|
||||||
" Ensure that the given application_name or coordinator_state_table_name is correct"
|
|
||||||
" and you have followed all prior migration steps.")
|
|
||||||
else:
|
|
||||||
print(f"Unable to roll-forward due to error: {str(e)}. "
|
|
||||||
"Please resolve and run this migration script again.")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Unable to roll-forward due to error: {str(e)}. "
|
|
||||||
"Please resolve and run this migration script again.")
|
|
||||||
|
|
||||||
|
|
||||||
def delete_gsi_if_exists(dynamodb_client, table_name):
|
|
||||||
"""
|
|
||||||
Deletes GSI on given lease table if it exists.
|
|
||||||
|
|
||||||
:param dynamodb_client: Boto3 DynamoDB client
|
|
||||||
:param table_name: Name of lease table to remove GSI from
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
gsi_present = False
|
|
||||||
response = dynamodb_client.describe_table(TableName=table_name)
|
|
||||||
if 'GlobalSecondaryIndexes' in response['Table']:
|
|
||||||
gsi_list = response['Table']['GlobalSecondaryIndexes']
|
|
||||||
for gsi in gsi_list:
|
|
||||||
if gsi['IndexName'] == GSI_NAME:
|
|
||||||
gsi_present = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if not gsi_present:
|
|
||||||
print(f"GSI {GSI_NAME} is not present on lease table {table_name}. It may already be successfully"
|
|
||||||
" deleted. Or if lease table name is incorrect, please re-run the KclMigrationTool with correct"
|
|
||||||
" application_name or lease_table_name.")
|
|
||||||
return
|
|
||||||
except ClientError as e:
|
|
||||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
|
||||||
print(f"Lease table {table_name} does not exist, please check application_name or lease_table_name"
|
|
||||||
" configuration and try again.")
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
print(f"An unexpected error occurred while checking if GSI {GSI_NAME} exists"
|
|
||||||
f" on lease table {table_name}: {str(e)}. Please rectify the error and try again.")
|
|
||||||
return
|
|
||||||
|
|
||||||
print(f"Deleting GSI '{GSI_NAME}' from table '{table_name}'...")
|
|
||||||
try:
|
|
||||||
dynamodb_client.update_table(
|
|
||||||
TableName=table_name,
|
|
||||||
GlobalSecondaryIndexUpdates=[
|
|
||||||
{
|
|
||||||
'Delete': {
|
|
||||||
'IndexName': GSI_NAME
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
except ClientError as e:
|
|
||||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
|
||||||
print(f"{GSI_NAME} not found or table '{table_name}' not found.")
|
|
||||||
elif e.response['Error']['Code'] == 'ResourceInUseException':
|
|
||||||
print(f"Unable to delete GSI: '{table_name}' is currently being modified.")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"An unexpected error occurred while deleting GSI {GSI_NAME} on lease table {table_name}: {str(e)}."
|
|
||||||
" Please manually confirm the GSI is removed from the lease table, or"
|
|
||||||
" resolve the error and rerun the migration script.")
|
|
||||||
|
|
||||||
|
|
||||||
def delete_worker_metrics_table_if_exists(dynamodb_client, worker_metrics_table_name):
|
|
||||||
"""
|
|
||||||
Deletes worker metrics table based on application name, if it exists.
|
|
||||||
|
|
||||||
:param dynamodb_client: Boto3 DynamoDB client
|
|
||||||
:param worker_metrics_table_name: Name of the DynamoDB worker metrics table
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
dynamodb_client.describe_table(TableName=worker_metrics_table_name)
|
|
||||||
except ClientError as e:
|
|
||||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
|
||||||
print(f"Worker metrics table {worker_metrics_table_name} does not exist."
|
|
||||||
" It may already be successfully deleted. Please check that the application_name"
|
|
||||||
" or worker_metrics_table_name is correct. If not, correct this and rerun the migration script.")
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
print(f"An unexpected error occurred when checking if {worker_metrics_table_name} table exists: {str(e)}."
|
|
||||||
" Please manually confirm the table is deleted, or resolve the error"
|
|
||||||
" and rerun the migration script.")
|
|
||||||
return
|
|
||||||
|
|
||||||
print(f"Deleting worker metrics table {worker_metrics_table_name}...")
|
|
||||||
try:
|
|
||||||
dynamodb_client.delete_table(TableName=worker_metrics_table_name)
|
|
||||||
except ClientError as e:
|
|
||||||
if e.response['Error']['Code'] == 'AccessDeniedException':
|
|
||||||
print(f"No permissions to delete table {worker_metrics_table_name}. Please manually delete it if you"
|
|
||||||
" want to avoid any charges until you are ready to rollforward with migration.")
|
|
||||||
else:
|
|
||||||
print(f"An unexpected client error occurred while deleting worker metrics table: {str(e)}."
|
|
||||||
" Please manually confirm the table is deleted, or resolve the error"
|
|
||||||
" and rerun the migration script.")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"An unexpected error occurred while deleting worker metrics table: {str(e)}."
|
|
||||||
" Please manually confirm the table is deleted, or resolve the error"
|
|
||||||
" and rerun the migration script.")
|
|
||||||
|
|
||||||
|
|
||||||
def perform_rollback(dynamodb_client, lease_table_name, coordinator_state_table_name, worker_metrics_table_name):
|
|
||||||
"""
|
|
||||||
Perform KCL 3.0 migration rollback by updating MigrationState for the KCL application.
|
|
||||||
Rolls client version back, removes GSI from lease table, deletes worker metrics table.
|
|
||||||
|
|
||||||
:param dynamodb_client: Boto3 DynamoDB client
|
|
||||||
:param coordinator_state_table_name: Name of the DynamoDB coordinator state table
|
|
||||||
:param coordinator_state_table_name: Name of the DynamoDB coordinator state table
|
|
||||||
:param worker_metrics_table_name: Name of the DynamoDB worker metrics table
|
|
||||||
"""
|
|
||||||
if not validate_tables(dynamodb_client, "Rollback", coordinator_state_table_name, lease_table_name):
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
initial_version, new_history = get_current_state(dynamodb_client,
|
|
||||||
coordinator_state_table_name)
|
|
||||||
except ClientError as e:
|
|
||||||
handle_get_item_client_error(e, "Rollback", coordinator_state_table_name)
|
|
||||||
return
|
|
||||||
|
|
||||||
if not is_valid_version(version=initial_version, mode='rollback'):
|
|
||||||
return
|
|
||||||
|
|
||||||
# 1. Rollback client version
|
|
||||||
if initial_version != KclClientVersion.VERSION_2X.value:
|
|
||||||
rollback_succeeded, initial_version = rollback_client_version(
|
|
||||||
dynamodb_client, coordinator_state_table_name, new_history
|
|
||||||
)
|
|
||||||
if not rollback_succeeded:
|
|
||||||
return
|
|
||||||
|
|
||||||
print(f"Waiting for {GSI_DELETION_WAIT_TIME_SECONDS} seconds before cleaning up KCL 3.0 resources after rollback...")
|
|
||||||
time.sleep(GSI_DELETION_WAIT_TIME_SECONDS)
|
|
||||||
|
|
||||||
# 2. Delete the GSI
|
|
||||||
delete_gsi_if_exists(dynamodb_client, lease_table_name)
|
|
||||||
|
|
||||||
# 3. Delete worker metrics table
|
|
||||||
delete_worker_metrics_table_if_exists(dynamodb_client, worker_metrics_table_name)
|
|
||||||
|
|
||||||
# Log success
|
|
||||||
if initial_version == KclClientVersion.UPGRADE_FROM_2X.value:
|
|
||||||
print("\nRollback completed. Your application was running 2x compatible functionality.")
|
|
||||||
print("Please rollback to your previous application binaries by deploying the code with your previous KCL version.")
|
|
||||||
elif initial_version == KclClientVersion.VERSION_3X_WITH_ROLLBACK.value:
|
|
||||||
print("\nRollback completed. Your KCL Application was running 3x functionality and will rollback to 2x compatible functionality.")
|
|
||||||
print("If you don't see mitigation after a short period of time,"
|
|
||||||
" please rollback to your previous application binaries by deploying the code with your previous KCL version.")
|
|
||||||
elif initial_version == KclClientVersion.VERSION_2X.value:
|
|
||||||
print("\nApplication was already rolled back. Any KCLv3 resources that could be deleted were cleaned up"
|
|
||||||
" to avoid charges until the application can be rolled forward with migration.")
|
|
||||||
|
|
||||||
|
|
||||||
def perform_rollforward(dynamodb_client, coordinator_state_table_name):
|
|
||||||
"""
|
|
||||||
Perform KCL 3.0 migration roll-forward by updating MigrationState for the KCL application
|
|
||||||
|
|
||||||
:param dynamodb_client: Boto3 DynamoDB client
|
|
||||||
:param coordinator_state_table_name: Name of the DynamoDB table
|
|
||||||
"""
|
|
||||||
if not validate_tables(dynamodb_client, "Roll-forward", coordinator_state_table_name):
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
initial_version, new_history = get_current_state(dynamodb_client,
|
|
||||||
coordinator_state_table_name)
|
|
||||||
except ClientError as e:
|
|
||||||
handle_get_item_client_error(e, "Roll-forward", coordinator_state_table_name)
|
|
||||||
return
|
|
||||||
|
|
||||||
if not is_valid_version(version=initial_version, mode='rollforward'):
|
|
||||||
return
|
|
||||||
|
|
||||||
rollfoward_client_version(dynamodb_client, coordinator_state_table_name, new_history)
|
|
||||||
|
|
||||||
|
|
||||||
def run_kcl_migration(mode, lease_table_name, coordinator_state_table_name, worker_metrics_table_name):
|
|
||||||
"""
|
|
||||||
Update the MigrationState in CoordinatorState DDB Table
|
|
||||||
|
|
||||||
:param mode: Either 'rollback' or 'rollforward'
|
|
||||||
:param lease_table_name: Name of the DynamoDB KCL lease table
|
|
||||||
:param coordinator_state_table_name: Name of the DynamoDB coordinator state table
|
|
||||||
:param worker_metrics_table_name: Name of the DynamoDB worker metrics table
|
|
||||||
"""
|
|
||||||
dynamodb_client = boto3.client('dynamodb', config=config)
|
|
||||||
|
|
||||||
if mode == "rollback":
|
|
||||||
perform_rollback(
|
|
||||||
dynamodb_client,
|
|
||||||
lease_table_name,
|
|
||||||
coordinator_state_table_name,
|
|
||||||
worker_metrics_table_name
|
|
||||||
)
|
|
||||||
elif mode == "rollforward":
|
|
||||||
perform_rollforward(dynamodb_client, coordinator_state_table_name)
|
|
||||||
else:
|
|
||||||
print(f"Invalid mode: {mode}. Please use 'rollback' or 'rollforward'.")
|
|
||||||
|
|
||||||
|
|
||||||
def validate_args(args):
|
|
||||||
if args.mode == 'rollforward':
|
|
||||||
if not (args.application_name or args.coordinator_state_table_name):
|
|
||||||
raise ValueError(
|
|
||||||
"For rollforward mode, either application_name or "
|
|
||||||
"coordinator_state_table_name must be provided."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if args.application_name:
|
|
||||||
return
|
|
||||||
|
|
||||||
if not (args.lease_table_name and
|
|
||||||
args.coordinator_state_table_name and
|
|
||||||
args.worker_metrics_table_name):
|
|
||||||
raise ValueError(
|
|
||||||
"For rollback mode, either application_name or all three table names "
|
|
||||||
"(lease_table_name, coordinator_state_table_name, and "
|
|
||||||
"worker_metrics_table_name) must be provided."
|
|
||||||
)
|
|
||||||
|
|
||||||
def process_table_names(args):
|
|
||||||
"""
|
|
||||||
Process command line arguments to determine table names based on mode.
|
|
||||||
Args:
|
|
||||||
args: Parsed command line arguments
|
|
||||||
Returns:
|
|
||||||
tuple: (mode, lease_table_name, coordinator_state_table_name, worker_metrics_table_name)
|
|
||||||
"""
|
|
||||||
mode_input = args.mode
|
|
||||||
application_name_input = args.application_name
|
|
||||||
|
|
||||||
coordinator_state_table_name_input = (args.coordinator_state_table_name or
|
|
||||||
application_name_input + DEFAULT_COORDINATOR_STATE_TABLE_SUFFIX)
|
|
||||||
lease_table_name_input = None
|
|
||||||
worker_metrics_table_name_input = None
|
|
||||||
|
|
||||||
if mode_input == "rollback":
|
|
||||||
lease_table_name_input = args.lease_table_name or application_name_input
|
|
||||||
worker_metrics_table_name_input = (args.worker_metrics_table_name or
|
|
||||||
application_name_input + DEFAULT_WORKER_METRICS_TABLE_SUFFIX)
|
|
||||||
|
|
||||||
return (mode_input,
|
|
||||||
lease_table_name_input,
|
|
||||||
coordinator_state_table_name_input,
|
|
||||||
worker_metrics_table_name_input)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description=
|
|
||||||
"""
|
|
||||||
KCL Migration Tool
|
|
||||||
This tool facilitates the migration and rollback processes for Amazon KCLv3 applications.
|
|
||||||
|
|
||||||
Before running this tool:
|
|
||||||
1. Ensure you have the necessary AWS permissions configured to access and modify the following:
|
|
||||||
- KCL application DynamoDB tables (lease table and coordinator state table)
|
|
||||||
|
|
||||||
2. Verify that your AWS credentials are properly set up in your environment or AWS config file.
|
|
||||||
|
|
||||||
3. Confirm that you have the correct KCL application name and lease table name (if configured in KCL).
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
This tool supports two main operations: rollforward (upgrade) and rollback.
|
|
||||||
For detailed usage instructions, use the -h or --help option.
|
|
||||||
""",
|
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
||||||
parser.add_argument("--mode", choices=['rollback', 'rollforward'], required=True,
|
|
||||||
help="Mode of operation: rollback or rollforward")
|
|
||||||
parser.add_argument("--application_name",
|
|
||||||
help="Name of the KCL application. This must match the application name "
|
|
||||||
"used in the KCL Library configurations.")
|
|
||||||
parser.add_argument("--lease_table_name",
|
|
||||||
help="Name of the DynamoDB lease table (defaults to applicationName)."
|
|
||||||
" If LeaseTable name was specified for the application as part of "
|
|
||||||
"the KCL configurations, the same name must be passed here.")
|
|
||||||
parser.add_argument("--coordinator_state_table_name",
|
|
||||||
help="Name of the DynamoDB coordinator state table "
|
|
||||||
"(defaults to applicationName-CoordinatorState)."
|
|
||||||
" If coordinator state table name was specified for the application "
|
|
||||||
"as part of the KCL configurations, the same name must be passed here.")
|
|
||||||
parser.add_argument("--worker_metrics_table_name",
|
|
||||||
help="Name of the DynamoDB worker metrics table "
|
|
||||||
"(defaults to applicationName-WorkerMetricStats)."
|
|
||||||
" If worker metrics table name was specified for the application "
|
|
||||||
"as part of the KCL configurations, the same name must be passed here.")
|
|
||||||
parser.add_argument("--region", required=True,
|
|
||||||
help="AWS Region where your KCL application exists")
|
|
||||||
args = parser.parse_args()
|
|
||||||
validate_args(args)
|
|
||||||
config.region_name = args.region
|
|
||||||
run_kcl_migration(*process_table_names(args))
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
public final class KinesisClientLibraryPackage {
|
|
||||||
public static final String VERSION = "${project.version}";
|
|
||||||
}
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.annotations;
|
|
||||||
|
|
||||||
import java.lang.annotation.Retention;
|
|
||||||
import java.lang.annotation.RetentionPolicy;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Marker interface for 'internal' APIs that should not be used outside the core module.
|
|
||||||
* Breaking changes can and will be introduced to elements marked as KinesisClientInternalApi.
|
|
||||||
* Users of the KCL should not depend on any packages, types, fields, constructors, or methods with this annotation.
|
|
||||||
*/
|
|
||||||
@Retention(RetentionPolicy.CLASS)
|
|
||||||
public @interface KinesisClientInternalApi {}
|
|
||||||
|
|
@ -1,54 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.checkpoint;
|
|
||||||
|
|
||||||
import lombok.Data;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A class encapsulating the 2 pieces of state stored in a checkpoint.
|
|
||||||
*/
|
|
||||||
@Data
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
public class Checkpoint {
|
|
||||||
private final ExtendedSequenceNumber checkpoint;
|
|
||||||
private final ExtendedSequenceNumber pendingCheckpoint;
|
|
||||||
private final byte[] pendingCheckpointState;
|
|
||||||
|
|
||||||
@Deprecated
|
|
||||||
public Checkpoint(final ExtendedSequenceNumber checkpoint, final ExtendedSequenceNumber pendingCheckpoint) {
|
|
||||||
this(checkpoint, pendingCheckpoint, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor.
|
|
||||||
*
|
|
||||||
* @param checkpoint the checkpoint sequence number - cannot be null or empty.
|
|
||||||
* @param pendingCheckpoint the pending checkpoint sequence number - can be null.
|
|
||||||
* @param pendingCheckpointState the pending checkpoint state - can be null.
|
|
||||||
*/
|
|
||||||
public Checkpoint(
|
|
||||||
final ExtendedSequenceNumber checkpoint,
|
|
||||||
final ExtendedSequenceNumber pendingCheckpoint,
|
|
||||||
byte[] pendingCheckpointState) {
|
|
||||||
if (checkpoint == null || checkpoint.sequenceNumber().isEmpty()) {
|
|
||||||
throw new IllegalArgumentException("Checkpoint cannot be null or empty");
|
|
||||||
}
|
|
||||||
this.checkpoint = checkpoint;
|
|
||||||
this.pendingCheckpoint = pendingCheckpoint;
|
|
||||||
this.pendingCheckpointState = pendingCheckpointState;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.checkpoint;
|
|
||||||
|
|
||||||
import software.amazon.kinesis.leases.LeaseCoordinator;
|
|
||||||
import software.amazon.kinesis.leases.LeaseRefresher;
|
|
||||||
import software.amazon.kinesis.processor.Checkpointer;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public interface CheckpointFactory {
|
|
||||||
Checkpointer createCheckpointer(LeaseCoordinator leaseCoordinator, LeaseRefresher leaseRefresher);
|
|
||||||
}
|
|
||||||
|
|
@ -1,190 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.checkpoint;
|
|
||||||
|
|
||||||
import java.math.BigInteger;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
import lombok.Data;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This supports extracting the shardId from a sequence number.
|
|
||||||
*
|
|
||||||
* <h2>Warning</h2>
|
|
||||||
* <strong>Sequence numbers are an opaque value used by Kinesis, and maybe changed at any time. Should validation stop
|
|
||||||
* working you may need to update your version of the KCL</strong>
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class SequenceNumberValidator {
|
|
||||||
|
|
||||||
@Data
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
private static class SequenceNumberComponents {
|
|
||||||
final int version;
|
|
||||||
final int shardId;
|
|
||||||
}
|
|
||||||
|
|
||||||
private interface SequenceNumberReader {
|
|
||||||
Optional<SequenceNumberComponents> read(String sequenceNumber);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Reader for the v2 sequence number format. v1 sequence numbers are no longer used or available.
|
|
||||||
*/
|
|
||||||
private static class V2SequenceNumberReader implements SequenceNumberReader {
|
|
||||||
|
|
||||||
private static final int VERSION = 2;
|
|
||||||
|
|
||||||
private static final int EXPECTED_BIT_LENGTH = 186;
|
|
||||||
|
|
||||||
private static final int VERSION_OFFSET = 184;
|
|
||||||
private static final long VERSION_MASK = (1 << 4) - 1;
|
|
||||||
|
|
||||||
private static final int SHARD_ID_OFFSET = 4;
|
|
||||||
private static final long SHARD_ID_MASK = (1L << 32) - 1;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Optional<SequenceNumberComponents> read(String sequenceNumberString) {
|
|
||||||
BigInteger sequenceNumber = new BigInteger(sequenceNumberString, 10);
|
|
||||||
|
|
||||||
//
|
|
||||||
// If the bit length of the sequence number isn't 186 it's impossible for the version numbers
|
|
||||||
// to be where we expect them. We treat this the same as an unknown version of the sequence number
|
|
||||||
//
|
|
||||||
// If the sequence number length isn't what we expect it's due to a new version of the sequence number or
|
|
||||||
// an invalid sequence number. This
|
|
||||||
//
|
|
||||||
if (sequenceNumber.bitLength() != EXPECTED_BIT_LENGTH) {
|
|
||||||
return Optional.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Read the 4 most significant bits of the sequence number, the 2 most significant bits are implicitly 0
|
|
||||||
// (2 == 0b0011). If the version number doesn't match we give up and say we can't parse the sequence number
|
|
||||||
//
|
|
||||||
int version = readOffset(sequenceNumber, VERSION_OFFSET, VERSION_MASK);
|
|
||||||
if (version != VERSION) {
|
|
||||||
return Optional.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// If we get here the sequence number is big enough, and the version matches so the shardId should be valid.
|
|
||||||
//
|
|
||||||
int shardId = readOffset(sequenceNumber, SHARD_ID_OFFSET, SHARD_ID_MASK);
|
|
||||||
return Optional.of(new SequenceNumberComponents(version, shardId));
|
|
||||||
}
|
|
||||||
|
|
||||||
private int readOffset(BigInteger sequenceNumber, int offset, long mask) {
|
|
||||||
long value = sequenceNumber.shiftRight(offset).longValue() & mask;
|
|
||||||
return (int) value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static final List<SequenceNumberReader> SEQUENCE_NUMBER_READERS =
|
|
||||||
Collections.singletonList(new V2SequenceNumberReader());
|
|
||||||
|
|
||||||
private Optional<SequenceNumberComponents> retrieveComponentsFor(String sequenceNumber) {
|
|
||||||
return SEQUENCE_NUMBER_READERS.stream()
|
|
||||||
.map(r -> r.read(sequenceNumber))
|
|
||||||
.filter(Optional::isPresent)
|
|
||||||
.map(Optional::get)
|
|
||||||
.findFirst();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Attempts to retrieve the version for a sequence number. If no reader can be found for the sequence number this
|
|
||||||
* will return an empty Optional.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* <strong>This will return an empty Optional if the it's unable to extract the version number. This can occur for
|
|
||||||
* multiple reasons including:
|
|
||||||
* <ul>
|
|
||||||
* <li>Kinesis has started using a new version of sequence numbers</li>
|
|
||||||
* <li>The provided sequence number isn't a valid Kinesis sequence number.</li>
|
|
||||||
* </ul>
|
|
||||||
* </strong>
|
|
||||||
* </p>
|
|
||||||
*
|
|
||||||
* @param sequenceNumber
|
|
||||||
* the sequence number to extract the version from
|
|
||||||
* @return an Optional containing the version if a compatible sequence number reader can be found, an empty Optional
|
|
||||||
* otherwise.
|
|
||||||
*/
|
|
||||||
public Optional<Integer> versionFor(String sequenceNumber) {
|
|
||||||
return retrieveComponentsFor(sequenceNumber).map(SequenceNumberComponents::version);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Attempts to retrieve the shardId from a sequence number. If the version of the sequence number is unsupported
|
|
||||||
* this will return an empty optional.
|
|
||||||
*
|
|
||||||
* <strong>This will return an empty Optional if the sequence number isn't recognized. This can occur for multiple
|
|
||||||
* reasons including:
|
|
||||||
* <ul>
|
|
||||||
* <li>Kinesis has started using a new version of sequence numbers</li>
|
|
||||||
* <li>The provided sequence number isn't a valid Kinesis sequence number.</li>
|
|
||||||
* </ul>
|
|
||||||
* </strong>
|
|
||||||
* <p>
|
|
||||||
* This should always return a value if {@link #versionFor(String)} returns a value
|
|
||||||
* </p>
|
|
||||||
*
|
|
||||||
* @param sequenceNumber
|
|
||||||
* the sequence number to extract the shardId from
|
|
||||||
* @return an Optional containing the shardId if the version is supported, an empty Optional otherwise.
|
|
||||||
*/
|
|
||||||
public Optional<String> shardIdFor(String sequenceNumber) {
|
|
||||||
return retrieveComponentsFor(sequenceNumber).map(s -> String.format("shardId-%012d", s.shardId()));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Validates that the sequence number provided contains the given shardId. If the sequence number is unsupported
|
|
||||||
* this will return an empty Optional.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* Validation of a sequence number will only occur if the sequence number can be parsed. It's possible to use
|
|
||||||
* {@link #versionFor(String)} to verify that the given sequence number is supported by this class. There are 3
|
|
||||||
* possible validation states:
|
|
||||||
* <dl>
|
|
||||||
* <dt>Some(True)</dt>
|
|
||||||
* <dd>The sequence number can be parsed, and the shardId matches the one in the sequence number</dd>
|
|
||||||
* <dt>Some(False)</dt>
|
|
||||||
* <dd>THe sequence number can be parsed, and the shardId doesn't match the one in the sequence number</dd>
|
|
||||||
* <dt>None</dt>
|
|
||||||
* <dd>It wasn't possible to parse the sequence number so the validity of the sequence number is unknown</dd>
|
|
||||||
* </dl>
|
|
||||||
* </p>
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
* <strong>Handling unknown validation causes is application specific, and not specific handling is
|
|
||||||
* provided.</strong>
|
|
||||||
* </p>
|
|
||||||
*
|
|
||||||
* @param sequenceNumber
|
|
||||||
* the sequence number to verify the shardId
|
|
||||||
* @param shardId
|
|
||||||
* the shardId that the sequence is expected to contain
|
|
||||||
* @return true if the sequence number contains the shardId, false if it doesn't. If the sequence number version is
|
|
||||||
* unsupported this will return an empty Optional
|
|
||||||
*/
|
|
||||||
public Optional<Boolean> validateSequenceNumberForShard(String sequenceNumber, String shardId) {
|
|
||||||
return shardIdFor(sequenceNumber).map(s -> StringUtils.equalsIgnoreCase(s, shardId));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.checkpoint.dynamodb;
|
|
||||||
|
|
||||||
import lombok.Data;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.checkpoint.CheckpointFactory;
|
|
||||||
import software.amazon.kinesis.leases.LeaseCoordinator;
|
|
||||||
import software.amazon.kinesis.leases.LeaseRefresher;
|
|
||||||
import software.amazon.kinesis.processor.Checkpointer;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
@Data
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public class DynamoDBCheckpointFactory implements CheckpointFactory {
|
|
||||||
@Override
|
|
||||||
public Checkpointer createCheckpointer(
|
|
||||||
final LeaseCoordinator leaseLeaseCoordinator, final LeaseRefresher leaseRefresher) {
|
|
||||||
return new DynamoDBCheckpointer(leaseLeaseCoordinator, leaseRefresher);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,181 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.checkpoint.dynamodb;
|
|
||||||
|
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.UUID;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import lombok.NonNull;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.checkpoint.Checkpoint;
|
|
||||||
import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException;
|
|
||||||
import software.amazon.kinesis.exceptions.KinesisClientLibException;
|
|
||||||
import software.amazon.kinesis.exceptions.ShutdownException;
|
|
||||||
import software.amazon.kinesis.exceptions.ThrottlingException;
|
|
||||||
import software.amazon.kinesis.exceptions.internal.KinesisClientLibIOException;
|
|
||||||
import software.amazon.kinesis.leases.Lease;
|
|
||||||
import software.amazon.kinesis.leases.LeaseCoordinator;
|
|
||||||
import software.amazon.kinesis.leases.LeaseRefresher;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.DependencyException;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.InvalidStateException;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException;
|
|
||||||
import software.amazon.kinesis.processor.Checkpointer;
|
|
||||||
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
@Slf4j
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public class DynamoDBCheckpointer implements Checkpointer {
|
|
||||||
@NonNull
|
|
||||||
private final LeaseCoordinator leaseCoordinator;
|
|
||||||
|
|
||||||
@NonNull
|
|
||||||
private final LeaseRefresher leaseRefresher;
|
|
||||||
|
|
||||||
private String operation;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setCheckpoint(
|
|
||||||
final String leaseKey, final ExtendedSequenceNumber checkpointValue, final String concurrencyToken)
|
|
||||||
throws KinesisClientLibException {
|
|
||||||
try {
|
|
||||||
boolean wasSuccessful = setCheckpoint(leaseKey, checkpointValue, UUID.fromString(concurrencyToken));
|
|
||||||
if (!wasSuccessful) {
|
|
||||||
throw new ShutdownException("Can't update checkpoint - instance doesn't hold the lease for this shard");
|
|
||||||
}
|
|
||||||
} catch (ProvisionedThroughputException e) {
|
|
||||||
throw new ThrottlingException("Got throttled while updating checkpoint.", e);
|
|
||||||
} catch (InvalidStateException e) {
|
|
||||||
String message = "Unable to save checkpoint for shardId " + leaseKey;
|
|
||||||
log.error(message, e);
|
|
||||||
throw new software.amazon.kinesis.exceptions.InvalidStateException(message, e);
|
|
||||||
} catch (DependencyException e) {
|
|
||||||
throw new KinesisClientLibDependencyException("Unable to save checkpoint for shardId " + leaseKey, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ExtendedSequenceNumber getCheckpoint(final String leaseKey) throws KinesisClientLibException {
|
|
||||||
try {
|
|
||||||
return leaseRefresher.getLease(leaseKey).checkpoint();
|
|
||||||
} catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) {
|
|
||||||
String message = "Unable to fetch checkpoint for shardId " + leaseKey;
|
|
||||||
log.error(message, e);
|
|
||||||
throw new KinesisClientLibIOException(message, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Checkpoint getCheckpointObject(final String leaseKey) throws KinesisClientLibException {
|
|
||||||
try {
|
|
||||||
Lease lease = leaseRefresher.getLease(leaseKey);
|
|
||||||
log.debug("[{}] Retrieved lease => {}", leaseKey, lease);
|
|
||||||
return new Checkpoint(lease.checkpoint(), lease.pendingCheckpoint(), lease.pendingCheckpointState());
|
|
||||||
} catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) {
|
|
||||||
String message = "Unable to fetch checkpoint for shardId " + leaseKey;
|
|
||||||
log.error(message, e);
|
|
||||||
throw new KinesisClientLibIOException(message, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void prepareCheckpoint(
|
|
||||||
final String leaseKey, final ExtendedSequenceNumber pendingCheckpoint, final String concurrencyToken)
|
|
||||||
throws KinesisClientLibException {
|
|
||||||
prepareCheckpoint(leaseKey, pendingCheckpoint, concurrencyToken, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void prepareCheckpoint(
|
|
||||||
String leaseKey,
|
|
||||||
ExtendedSequenceNumber pendingCheckpoint,
|
|
||||||
String concurrencyToken,
|
|
||||||
byte[] pendingCheckpointState)
|
|
||||||
throws KinesisClientLibException {
|
|
||||||
try {
|
|
||||||
boolean wasSuccessful = prepareCheckpoint(
|
|
||||||
leaseKey, pendingCheckpoint, UUID.fromString(concurrencyToken), pendingCheckpointState);
|
|
||||||
if (!wasSuccessful) {
|
|
||||||
throw new ShutdownException(
|
|
||||||
"Can't prepare checkpoint - instance doesn't hold the lease for this shard");
|
|
||||||
}
|
|
||||||
} catch (ProvisionedThroughputException e) {
|
|
||||||
throw new ThrottlingException("Got throttled while preparing checkpoint.", e);
|
|
||||||
} catch (InvalidStateException e) {
|
|
||||||
String message = "Unable to prepare checkpoint for shardId " + leaseKey;
|
|
||||||
log.error(message, e);
|
|
||||||
throw new software.amazon.kinesis.exceptions.InvalidStateException(message, e);
|
|
||||||
} catch (DependencyException e) {
|
|
||||||
throw new KinesisClientLibDependencyException("Unable to prepare checkpoint for shardId " + leaseKey, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
public boolean setCheckpoint(String leaseKey, ExtendedSequenceNumber checkpoint, UUID concurrencyToken)
|
|
||||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
|
||||||
Lease lease = leaseCoordinator.getCurrentlyHeldLease(leaseKey);
|
|
||||||
if (lease == null) {
|
|
||||||
log.info(
|
|
||||||
"Worker {} could not update checkpoint for shard {} because it does not hold the lease",
|
|
||||||
leaseCoordinator.workerIdentifier(),
|
|
||||||
leaseKey);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
lease.checkpoint(checkpoint);
|
|
||||||
lease.pendingCheckpoint(null);
|
|
||||||
lease.pendingCheckpointState(null);
|
|
||||||
lease.ownerSwitchesSinceCheckpoint(0L);
|
|
||||||
|
|
||||||
return leaseCoordinator.updateLease(lease, concurrencyToken, operation, leaseKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean prepareCheckpoint(
|
|
||||||
String leaseKey,
|
|
||||||
ExtendedSequenceNumber pendingCheckpoint,
|
|
||||||
UUID concurrencyToken,
|
|
||||||
byte[] pendingCheckpointState)
|
|
||||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
|
||||||
Lease lease = leaseCoordinator.getCurrentlyHeldLease(leaseKey);
|
|
||||||
if (lease == null) {
|
|
||||||
log.info(
|
|
||||||
"Worker {} could not prepare checkpoint for shard {} because it does not hold the lease",
|
|
||||||
leaseCoordinator.workerIdentifier(),
|
|
||||||
leaseKey);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
lease.pendingCheckpoint(Objects.requireNonNull(pendingCheckpoint, "pendingCheckpoint should not be null"));
|
|
||||||
lease.pendingCheckpointState(pendingCheckpointState);
|
|
||||||
return leaseCoordinator.updateLease(lease, concurrencyToken, operation, leaseKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void operation(@NonNull final String operation) {
|
|
||||||
this.operation = operation;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String operation() {
|
|
||||||
return operation;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import lombok.NonNull;
|
|
||||||
import software.amazon.awssdk.arns.Arn;
|
|
||||||
import software.amazon.awssdk.regions.Region;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
|
|
||||||
import static software.amazon.awssdk.services.kinesis.KinesisAsyncClient.SERVICE_NAME;
|
|
||||||
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public final class ArnUtil {
|
|
||||||
private static final String STREAM_RESOURCE_PREFIX = "stream/";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Construct a Kinesis stream ARN.
|
|
||||||
*
|
|
||||||
* @param region The region the stream exists in.
|
|
||||||
* @param accountId The account the stream belongs to.
|
|
||||||
* @param streamName The name of the stream.
|
|
||||||
* @return The {@link Arn} of the Kinesis stream.
|
|
||||||
*/
|
|
||||||
public static Arn constructStreamArn(
|
|
||||||
@NonNull final Region region, @NonNull final String accountId, @NonNull final String streamName) {
|
|
||||||
return Arn.builder()
|
|
||||||
.partition(region.metadata().partition().id())
|
|
||||||
.service(SERVICE_NAME)
|
|
||||||
.region(region.id())
|
|
||||||
.accountId(accountId)
|
|
||||||
.resource(STREAM_RESOURCE_PREFIX + streamName)
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
public class CommonCalculations {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convenience method for calculating renewer intervals in milliseconds.
|
|
||||||
*
|
|
||||||
* @param leaseDurationMillis Duration of a lease
|
|
||||||
* @param epsilonMillis Allow for some variance when calculating lease expirations
|
|
||||||
*/
|
|
||||||
public static long getRenewerTakerIntervalMillis(long leaseDurationMillis, long epsilonMillis) {
|
|
||||||
return leaseDurationMillis / 3 - epsilonMillis;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,298 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import java.util.function.Function;
|
|
||||||
|
|
||||||
import lombok.EqualsAndHashCode;
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.NonNull;
|
|
||||||
import lombok.Setter;
|
|
||||||
import lombok.ToString;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import software.amazon.awssdk.arns.Arn;
|
|
||||||
import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.awssdk.utils.Either;
|
|
||||||
import software.amazon.kinesis.checkpoint.CheckpointConfig;
|
|
||||||
import software.amazon.kinesis.coordinator.CoordinatorConfig;
|
|
||||||
import software.amazon.kinesis.leases.LeaseManagementConfig;
|
|
||||||
import software.amazon.kinesis.lifecycle.LifecycleConfig;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsConfig;
|
|
||||||
import software.amazon.kinesis.processor.MultiStreamTracker;
|
|
||||||
import software.amazon.kinesis.processor.ProcessorConfig;
|
|
||||||
import software.amazon.kinesis.processor.ShardRecordProcessorFactory;
|
|
||||||
import software.amazon.kinesis.processor.SingleStreamTracker;
|
|
||||||
import software.amazon.kinesis.processor.StreamTracker;
|
|
||||||
import software.amazon.kinesis.retrieval.RetrievalConfig;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This Builder is useful to create all configurations for the KCL with default values.
|
|
||||||
*/
|
|
||||||
@Getter
|
|
||||||
@Setter
|
|
||||||
@ToString
|
|
||||||
@EqualsAndHashCode
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
public class ConfigsBuilder {
|
|
||||||
/**
|
|
||||||
* Either the name of the stream to consume records from
|
|
||||||
* Or MultiStreamTracker for all the streams to consume records from
|
|
||||||
*
|
|
||||||
* @deprecated Both single- and multi-stream support is now provided by {@link StreamTracker}.
|
|
||||||
* @see #streamTracker
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
private Either<MultiStreamTracker, String> appStreamTracker;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Stream(s) to be consumed by this KCL application.
|
|
||||||
*/
|
|
||||||
private StreamTracker streamTracker;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Application name for the KCL Worker
|
|
||||||
*/
|
|
||||||
@NonNull
|
|
||||||
private final String applicationName;
|
|
||||||
/**
|
|
||||||
* KinesisClient to be used to consumer records from Kinesis
|
|
||||||
*/
|
|
||||||
@NonNull
|
|
||||||
private final KinesisAsyncClient kinesisClient;
|
|
||||||
/**
|
|
||||||
* DynamoDBClient to be used to interact with DynamoDB service for lease management and checkpoiniting
|
|
||||||
*/
|
|
||||||
@NonNull
|
|
||||||
private final DynamoDbAsyncClient dynamoDBClient;
|
|
||||||
/**
|
|
||||||
* CloudWatchClient to be used to push KCL metrics to CloudWatch service
|
|
||||||
*/
|
|
||||||
@NonNull
|
|
||||||
private final CloudWatchAsyncClient cloudWatchClient;
|
|
||||||
/**
|
|
||||||
* KCL worker identifier to distinguish between 2 unique workers
|
|
||||||
*/
|
|
||||||
@NonNull
|
|
||||||
private final String workerIdentifier;
|
|
||||||
/**
|
|
||||||
* ShardRecordProcessorFactory to be used to create ShardRecordProcesor for processing records
|
|
||||||
*/
|
|
||||||
@NonNull
|
|
||||||
private final ShardRecordProcessorFactory shardRecordProcessorFactory;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Lease table name used for lease management and checkpointing.
|
|
||||||
*/
|
|
||||||
private String tableName;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Lease table name used for lease management and checkpointing.
|
|
||||||
*
|
|
||||||
* @return DynamoDB table name
|
|
||||||
*/
|
|
||||||
public String tableName() {
|
|
||||||
if (StringUtils.isEmpty(tableName)) {
|
|
||||||
tableName = applicationName();
|
|
||||||
}
|
|
||||||
return tableName;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* CloudWatch namespace for KCL metrics.
|
|
||||||
*/
|
|
||||||
private String namespace;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* CloudWatch namespace for KCL metrics.
|
|
||||||
*
|
|
||||||
* @return CloudWatch namespace
|
|
||||||
*/
|
|
||||||
public String namespace() {
|
|
||||||
if (StringUtils.isEmpty(namespace)) {
|
|
||||||
namespace = applicationName();
|
|
||||||
}
|
|
||||||
return namespace;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor to initialize ConfigsBuilder for a single stream identified by name.
|
|
||||||
*
|
|
||||||
* @param streamName
|
|
||||||
* @param applicationName
|
|
||||||
* @param kinesisClient
|
|
||||||
* @param dynamoDBClient
|
|
||||||
* @param cloudWatchClient
|
|
||||||
* @param workerIdentifier
|
|
||||||
* @param shardRecordProcessorFactory
|
|
||||||
*/
|
|
||||||
public ConfigsBuilder(
|
|
||||||
@NonNull String streamName,
|
|
||||||
@NonNull String applicationName,
|
|
||||||
@NonNull KinesisAsyncClient kinesisClient,
|
|
||||||
@NonNull DynamoDbAsyncClient dynamoDBClient,
|
|
||||||
@NonNull CloudWatchAsyncClient cloudWatchClient,
|
|
||||||
@NonNull String workerIdentifier,
|
|
||||||
@NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) {
|
|
||||||
this(
|
|
||||||
new SingleStreamTracker(streamName),
|
|
||||||
applicationName,
|
|
||||||
kinesisClient,
|
|
||||||
dynamoDBClient,
|
|
||||||
cloudWatchClient,
|
|
||||||
workerIdentifier,
|
|
||||||
shardRecordProcessorFactory);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor to initialize ConfigsBuilder for a single stream identified by {@link Arn}.
|
|
||||||
*
|
|
||||||
* @param streamArn
|
|
||||||
* @param applicationName
|
|
||||||
* @param kinesisClient
|
|
||||||
* @param dynamoDBClient
|
|
||||||
* @param cloudWatchClient
|
|
||||||
* @param workerIdentifier
|
|
||||||
* @param shardRecordProcessorFactory
|
|
||||||
*/
|
|
||||||
public ConfigsBuilder(
|
|
||||||
@NonNull Arn streamArn,
|
|
||||||
@NonNull String applicationName,
|
|
||||||
@NonNull KinesisAsyncClient kinesisClient,
|
|
||||||
@NonNull DynamoDbAsyncClient dynamoDBClient,
|
|
||||||
@NonNull CloudWatchAsyncClient cloudWatchClient,
|
|
||||||
@NonNull String workerIdentifier,
|
|
||||||
@NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) {
|
|
||||||
this(
|
|
||||||
new SingleStreamTracker(streamArn),
|
|
||||||
applicationName,
|
|
||||||
kinesisClient,
|
|
||||||
dynamoDBClient,
|
|
||||||
cloudWatchClient,
|
|
||||||
workerIdentifier,
|
|
||||||
shardRecordProcessorFactory);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor to initialize ConfigsBuilder
|
|
||||||
*
|
|
||||||
* @param streamTracker tracker for single- or multi-stream processing
|
|
||||||
* @param applicationName
|
|
||||||
* @param kinesisClient
|
|
||||||
* @param dynamoDBClient
|
|
||||||
* @param cloudWatchClient
|
|
||||||
* @param workerIdentifier
|
|
||||||
* @param shardRecordProcessorFactory
|
|
||||||
*/
|
|
||||||
public ConfigsBuilder(
|
|
||||||
@NonNull StreamTracker streamTracker,
|
|
||||||
@NonNull String applicationName,
|
|
||||||
@NonNull KinesisAsyncClient kinesisClient,
|
|
||||||
@NonNull DynamoDbAsyncClient dynamoDBClient,
|
|
||||||
@NonNull CloudWatchAsyncClient cloudWatchClient,
|
|
||||||
@NonNull String workerIdentifier,
|
|
||||||
@NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) {
|
|
||||||
this.applicationName = applicationName;
|
|
||||||
this.kinesisClient = kinesisClient;
|
|
||||||
this.dynamoDBClient = dynamoDBClient;
|
|
||||||
this.cloudWatchClient = cloudWatchClient;
|
|
||||||
this.workerIdentifier = workerIdentifier;
|
|
||||||
this.shardRecordProcessorFactory = shardRecordProcessorFactory;
|
|
||||||
|
|
||||||
// construct both streamTracker and appStreamTracker
|
|
||||||
streamTracker(streamTracker);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void appStreamTracker(Either<MultiStreamTracker, String> appStreamTracker) {
|
|
||||||
this.appStreamTracker = appStreamTracker;
|
|
||||||
streamTracker = appStreamTracker.map(Function.identity(), SingleStreamTracker::new);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void streamTracker(StreamTracker streamTracker) {
|
|
||||||
this.streamTracker = streamTracker;
|
|
||||||
this.appStreamTracker = DeprecationUtils.convert(streamTracker, singleStreamTracker -> singleStreamTracker
|
|
||||||
.streamConfigList()
|
|
||||||
.get(0)
|
|
||||||
.streamIdentifier()
|
|
||||||
.streamName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new instance of CheckpointConfig
|
|
||||||
*
|
|
||||||
* @return CheckpointConfig
|
|
||||||
*/
|
|
||||||
public CheckpointConfig checkpointConfig() {
|
|
||||||
return new CheckpointConfig();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new instance of CoordinatorConfig
|
|
||||||
*
|
|
||||||
* @return CoordinatorConfig
|
|
||||||
*/
|
|
||||||
public CoordinatorConfig coordinatorConfig() {
|
|
||||||
return new CoordinatorConfig(applicationName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new instance of LeaseManagementConfig
|
|
||||||
*
|
|
||||||
* @return LeaseManagementConfig
|
|
||||||
*/
|
|
||||||
public LeaseManagementConfig leaseManagementConfig() {
|
|
||||||
return new LeaseManagementConfig(
|
|
||||||
tableName(), applicationName(), dynamoDBClient(), kinesisClient(), workerIdentifier());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new instance of LifecycleConfig
|
|
||||||
*
|
|
||||||
* @return LifecycleConfig
|
|
||||||
*/
|
|
||||||
public LifecycleConfig lifecycleConfig() {
|
|
||||||
return new LifecycleConfig();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new instance of MetricsConfig
|
|
||||||
*
|
|
||||||
* @return MetricsConfig
|
|
||||||
*/
|
|
||||||
public MetricsConfig metricsConfig() {
|
|
||||||
return new MetricsConfig(cloudWatchClient(), namespace());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new instance of ProcessorConfig
|
|
||||||
*
|
|
||||||
* @return ProcessorConfigConfig
|
|
||||||
*/
|
|
||||||
public ProcessorConfig processorConfig() {
|
|
||||||
return new ProcessorConfig(shardRecordProcessorFactory());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new instance of RetrievalConfig
|
|
||||||
*
|
|
||||||
* @return RetrievalConfig
|
|
||||||
*/
|
|
||||||
public RetrievalConfig retrievalConfig() {
|
|
||||||
return new RetrievalConfig(kinesisClient(), streamTracker(), applicationName());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,76 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
|
||||||
|
|
||||||
import lombok.Data;
|
|
||||||
import lombok.NoArgsConstructor;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.Tag;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Configurations of a DDB table created by KCL for its internal operations.
|
|
||||||
*/
|
|
||||||
@Data
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@NoArgsConstructor
|
|
||||||
public class DdbTableConfig {
|
|
||||||
|
|
||||||
protected DdbTableConfig(final String applicationName, final String tableSuffix) {
|
|
||||||
this.tableName = applicationName + "-" + tableSuffix;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* name to use for the DDB table. If null, it will default to
|
|
||||||
* applicationName-tableSuffix. If multiple KCL applications
|
|
||||||
* run in the same account, a unique tableName must be provided.
|
|
||||||
*/
|
|
||||||
private String tableName;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Billing mode used to create the DDB table.
|
|
||||||
*/
|
|
||||||
private BillingMode billingMode = BillingMode.PAY_PER_REQUEST;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* read capacity to provision during DDB table creation,
|
|
||||||
* if billing mode is PROVISIONED.
|
|
||||||
*/
|
|
||||||
private long readCapacity;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* write capacity to provision during DDB table creation,
|
|
||||||
* if billing mode is PROVISIONED.
|
|
||||||
*/
|
|
||||||
private long writeCapacity;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Flag to enable Point in Time Recovery on the DDB table.
|
|
||||||
*/
|
|
||||||
private boolean pointInTimeRecoveryEnabled = false;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Flag to enable deletion protection on the DDB table.
|
|
||||||
*/
|
|
||||||
private boolean deletionProtectionEnabled = false;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Tags to add to the DDB table.
|
|
||||||
*/
|
|
||||||
private Collection<Tag> tags = Collections.emptyList();
|
|
||||||
}
|
|
||||||
|
|
@ -1,51 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2023 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import java.util.function.Function;
|
|
||||||
|
|
||||||
import software.amazon.awssdk.utils.Either;
|
|
||||||
import software.amazon.kinesis.processor.MultiStreamTracker;
|
|
||||||
import software.amazon.kinesis.processor.SingleStreamTracker;
|
|
||||||
import software.amazon.kinesis.processor.StreamTracker;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Utility methods to facilitate deprecated code until that deprecated code
|
|
||||||
* can be safely removed.
|
|
||||||
*/
|
|
||||||
public final class DeprecationUtils {
|
|
||||||
|
|
||||||
private DeprecationUtils() {
|
|
||||||
throw new UnsupportedOperationException("utility class");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Converts a {@link StreamTracker} into the deprecated {@code Either<L, R>} convention.
|
|
||||||
*
|
|
||||||
* @param streamTracker tracker to convert
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static <R> Either<MultiStreamTracker, R> convert(
|
|
||||||
StreamTracker streamTracker, Function<SingleStreamTracker, R> converter) {
|
|
||||||
if (streamTracker instanceof MultiStreamTracker) {
|
|
||||||
return Either.left((MultiStreamTracker) streamTracker);
|
|
||||||
} else if (streamTracker instanceof SingleStreamTracker) {
|
|
||||||
return Either.right(converter.apply((SingleStreamTracker) streamTracker));
|
|
||||||
} else {
|
|
||||||
throw new IllegalArgumentException("Unhandled StreamTracker: " + streamTracker);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,54 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
|
||||||
import java.time.Instant;
|
|
||||||
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
|
|
||||||
import static software.amazon.kinesis.lifecycle.ShardConsumer.MAX_TIME_BETWEEN_REQUEST_RESPONSE;
|
|
||||||
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public class DiagnosticUtils {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Util for RecordPublisher to measure the event delivery latency of the executor service and take appropriate action.
|
|
||||||
* @param resourceIdentifier of the shard that is having delayed delivery
|
|
||||||
* @param enqueueTimestamp of the event submitted to the executor service
|
|
||||||
* @param log Slf4j Logger from RecordPublisher to log the events
|
|
||||||
*/
|
|
||||||
public static void takeDelayedDeliveryActionIfRequired(
|
|
||||||
String resourceIdentifier, Instant enqueueTimestamp, Logger log) {
|
|
||||||
final long durationBetweenEnqueueAndAckInMillis =
|
|
||||||
Duration.between(enqueueTimestamp, Instant.now()).toMillis();
|
|
||||||
if (durationBetweenEnqueueAndAckInMillis > MAX_TIME_BETWEEN_REQUEST_RESPONSE / 3) {
|
|
||||||
// The above condition logs the warn msg if the delivery time exceeds 11 seconds.
|
|
||||||
log.warn(
|
|
||||||
"{}: Record delivery time to shard consumer is high at {} millis. Check the ExecutorStateEvent logs"
|
|
||||||
+ " to see the state of the executor service. Also check if the RecordProcessor's processing "
|
|
||||||
+ "time is high. ",
|
|
||||||
resourceIdentifier,
|
|
||||||
durationBetweenEnqueueAndAckInMillis);
|
|
||||||
} else if (log.isDebugEnabled()) {
|
|
||||||
log.debug(
|
|
||||||
"{}: Record delivery time to shard consumer is {} millis",
|
|
||||||
resourceIdentifier,
|
|
||||||
durationBetweenEnqueueAndAckInMillis);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,48 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
|
||||||
import java.util.concurrent.CompletableFuture;
|
|
||||||
import java.util.concurrent.CompletionException;
|
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
import java.util.concurrent.Future;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.TimeoutException;
|
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
public class FutureUtils {
|
|
||||||
|
|
||||||
public static <T> T resolveOrCancelFuture(Future<T> future, Duration timeout)
|
|
||||||
throws ExecutionException, InterruptedException, TimeoutException {
|
|
||||||
try {
|
|
||||||
return future.get(timeout.toMillis(), TimeUnit.MILLISECONDS);
|
|
||||||
} catch (TimeoutException te) {
|
|
||||||
future.cancel(true);
|
|
||||||
throw te;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static <T> T unwrappingFuture(final Supplier<CompletableFuture<T>> supplier) {
|
|
||||||
try {
|
|
||||||
return supplier.get().join();
|
|
||||||
} catch (CompletionException e) {
|
|
||||||
if (e.getCause() instanceof RuntimeException) {
|
|
||||||
throw (RuntimeException) e.getCause();
|
|
||||||
}
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
|
||||||
|
|
||||||
import software.amazon.awssdk.http.Protocol;
|
|
||||||
import software.amazon.awssdk.http.nio.netty.Http2Configuration;
|
|
||||||
import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Utility to setup KinesisAsyncClient to be used with KCL.
|
|
||||||
*/
|
|
||||||
public class KinesisClientUtil {
|
|
||||||
|
|
||||||
private static int INITIAL_WINDOW_SIZE_BYTES = 512 * 1024; // 512 KB
|
|
||||||
private static long HEALTH_CHECK_PING_PERIOD_MILLIS = 60 * 1000;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a client from a builder.
|
|
||||||
*
|
|
||||||
* @param clientBuilder
|
|
||||||
* @return
|
|
||||||
*/
|
|
||||||
public static KinesisAsyncClient createKinesisAsyncClient(KinesisAsyncClientBuilder clientBuilder) {
|
|
||||||
return adjustKinesisClientBuilder(clientBuilder).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
public static KinesisAsyncClientBuilder adjustKinesisClientBuilder(KinesisAsyncClientBuilder builder) {
|
|
||||||
return builder.httpClientBuilder(NettyNioAsyncHttpClient.builder()
|
|
||||||
.maxConcurrency(Integer.MAX_VALUE)
|
|
||||||
.http2Configuration(Http2Configuration.builder()
|
|
||||||
.initialWindowSize(INITIAL_WINDOW_SIZE_BYTES)
|
|
||||||
.healthCheckPingPeriod(Duration.ofMillis(HEALTH_CHECK_PING_PERIOD_MILLIS))
|
|
||||||
.build())
|
|
||||||
.protocol(Protocol.HTTP2));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,73 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import software.amazon.awssdk.awscore.AwsRequest;
|
|
||||||
import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration;
|
|
||||||
import software.amazon.awssdk.core.ApiName;
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest;
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest;
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest;
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest;
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.ListShardsRequest;
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest;
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.retrieval.RetrievalConfig;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public class KinesisRequestsBuilder {
|
|
||||||
public static ListShardsRequest.Builder listShardsRequestBuilder() {
|
|
||||||
return appendUserAgent(ListShardsRequest.builder());
|
|
||||||
}
|
|
||||||
|
|
||||||
public static SubscribeToShardRequest.Builder subscribeToShardRequestBuilder() {
|
|
||||||
return appendUserAgent(SubscribeToShardRequest.builder());
|
|
||||||
}
|
|
||||||
|
|
||||||
public static GetRecordsRequest.Builder getRecordsRequestBuilder() {
|
|
||||||
return appendUserAgent(GetRecordsRequest.builder());
|
|
||||||
}
|
|
||||||
|
|
||||||
public static GetShardIteratorRequest.Builder getShardIteratorRequestBuilder() {
|
|
||||||
return appendUserAgent(GetShardIteratorRequest.builder());
|
|
||||||
}
|
|
||||||
|
|
||||||
public static DescribeStreamSummaryRequest.Builder describeStreamSummaryRequestBuilder() {
|
|
||||||
return appendUserAgent(DescribeStreamSummaryRequest.builder());
|
|
||||||
}
|
|
||||||
|
|
||||||
public static RegisterStreamConsumerRequest.Builder registerStreamConsumerRequestBuilder() {
|
|
||||||
return appendUserAgent(RegisterStreamConsumerRequest.builder());
|
|
||||||
}
|
|
||||||
|
|
||||||
public static DescribeStreamConsumerRequest.Builder describeStreamConsumerRequestBuilder() {
|
|
||||||
return appendUserAgent(DescribeStreamConsumerRequest.builder());
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
private static <T extends AwsRequest.Builder> T appendUserAgent(final T builder) {
|
|
||||||
return (T) builder.overrideConfiguration(AwsRequestOverrideConfiguration.builder()
|
|
||||||
.addApiName(ApiName.builder()
|
|
||||||
.name(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT)
|
|
||||||
.version(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT_VERSION)
|
|
||||||
.build())
|
|
||||||
.build());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import lombok.Builder;
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Configuration for lease cleanup.
|
|
||||||
*/
|
|
||||||
@Builder
|
|
||||||
@Getter
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
public class LeaseCleanupConfig {
|
|
||||||
/**
|
|
||||||
* Interval at which to run lease cleanup thread.
|
|
||||||
*/
|
|
||||||
private final long leaseCleanupIntervalMillis;
|
|
||||||
/**
|
|
||||||
* Interval at which to check if a lease is completed or not.
|
|
||||||
*/
|
|
||||||
private final long completedLeaseCleanupIntervalMillis;
|
|
||||||
/**
|
|
||||||
* Interval at which to check if a lease is garbage (i.e trimmed past the stream's retention period) or not.
|
|
||||||
*/
|
|
||||||
private final long garbageLeaseCleanupIntervalMillis;
|
|
||||||
}
|
|
||||||
|
|
@ -1,65 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
public class RequestDetails {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Placeholder for logging when no successful request has been made.
|
|
||||||
*/
|
|
||||||
private static final String NONE = "NONE";
|
|
||||||
|
|
||||||
private final Optional<String> requestId;
|
|
||||||
private final Optional<String> timestamp;
|
|
||||||
|
|
||||||
public RequestDetails() {
|
|
||||||
this.requestId = Optional.empty();
|
|
||||||
this.timestamp = Optional.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
public RequestDetails(String requestId, String timestamp) {
|
|
||||||
this.requestId = Optional.of(requestId);
|
|
||||||
this.timestamp = Optional.of(timestamp);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets last successful request's request id.
|
|
||||||
*
|
|
||||||
* @return requestId associated with last successful request.
|
|
||||||
*/
|
|
||||||
public String getRequestId() {
|
|
||||||
return requestId.orElse(NONE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets last successful request's timestamp.
|
|
||||||
*
|
|
||||||
* @return timestamp associated with last successful request.
|
|
||||||
*/
|
|
||||||
public String getTimestamp() {
|
|
||||||
return timestamp.orElse(NONE);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return String.format("request id - %s, timestamp - %s", getRequestId(), getTimestamp());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
public class StackTraceUtils {
|
|
||||||
public static String getPrintableStackTrace(final StackTraceElement[] stackTrace) {
|
|
||||||
final StringBuilder stackTraceString = new StringBuilder();
|
|
||||||
|
|
||||||
for (final StackTraceElement traceElement : stackTrace) {
|
|
||||||
stackTraceString.append("\tat ").append(traceElement).append("\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
return stackTraceString.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,195 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2020 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.common;
|
|
||||||
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.regex.Matcher;
|
|
||||||
import java.util.regex.Pattern;
|
|
||||||
|
|
||||||
import lombok.AccessLevel;
|
|
||||||
import lombok.Builder;
|
|
||||||
import lombok.EqualsAndHashCode;
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.NonNull;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import software.amazon.awssdk.arns.Arn;
|
|
||||||
import software.amazon.awssdk.regions.Region;
|
|
||||||
import software.amazon.awssdk.utils.Validate;
|
|
||||||
|
|
||||||
@Builder(access = AccessLevel.PRIVATE)
|
|
||||||
@EqualsAndHashCode
|
|
||||||
@Getter
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
public class StreamIdentifier {
|
|
||||||
|
|
||||||
@Builder.Default
|
|
||||||
private final Optional<String> accountIdOptional = Optional.empty();
|
|
||||||
|
|
||||||
@NonNull
|
|
||||||
private final String streamName;
|
|
||||||
|
|
||||||
@Builder.Default
|
|
||||||
private final Optional<Long> streamCreationEpochOptional = Optional.empty();
|
|
||||||
|
|
||||||
@Builder.Default
|
|
||||||
@EqualsAndHashCode.Exclude
|
|
||||||
private final Optional<Arn> streamArnOptional = Optional.empty();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Pattern for a serialized {@link StreamIdentifier}. The valid format is
|
|
||||||
* {@code <accountId>:<streamName>:<creationEpoch>}.
|
|
||||||
*/
|
|
||||||
private static final Pattern STREAM_IDENTIFIER_PATTERN =
|
|
||||||
Pattern.compile("(?<accountId>[0-9]+):(?<streamName>[^:]+):(?<creationEpoch>[0-9]+)");
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Pattern for a stream ARN. The valid format is
|
|
||||||
* {@code arn:aws:kinesis:<region>:<accountId>:stream:<streamName>}
|
|
||||||
* where {@code region} is the id representation of a {@link Region}.
|
|
||||||
*/
|
|
||||||
private static final Pattern STREAM_ARN_PATTERN = Pattern.compile(
|
|
||||||
"arn:aws[^:]*:kinesis:(?<region>[-a-z0-9]+):(?<accountId>[0-9]{12}):stream/(?<streamName>.+)");
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Serialize the current StreamIdentifier instance.
|
|
||||||
*
|
|
||||||
* @return a String of {@code account:stream:creationEpoch} in multi-stream mode
|
|
||||||
* or {@link #streamName} in single-stream mode.
|
|
||||||
*/
|
|
||||||
public String serialize() {
|
|
||||||
if (!streamCreationEpochOptional.isPresent()) {
|
|
||||||
// creation epoch is expected to be empty in single-stream mode
|
|
||||||
return streamName;
|
|
||||||
}
|
|
||||||
|
|
||||||
final char delimiter = ':';
|
|
||||||
final StringBuilder sb = new StringBuilder()
|
|
||||||
.append(accountIdOptional.get())
|
|
||||||
.append(delimiter)
|
|
||||||
.append(streamName)
|
|
||||||
.append(delimiter)
|
|
||||||
.append(streamCreationEpochOptional.get());
|
|
||||||
return sb.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return serialize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a multi stream instance for StreamIdentifier from serialized stream identifier
|
|
||||||
* of format {@link #STREAM_IDENTIFIER_PATTERN}
|
|
||||||
*
|
|
||||||
* @param streamIdentifierSer a String of {@code account:stream:creationEpoch}
|
|
||||||
* @return StreamIdentifier with {@link #accountIdOptional} and {@link #streamCreationEpochOptional} present
|
|
||||||
*/
|
|
||||||
public static StreamIdentifier multiStreamInstance(String streamIdentifierSer) {
|
|
||||||
final Matcher matcher = STREAM_IDENTIFIER_PATTERN.matcher(streamIdentifierSer);
|
|
||||||
if (matcher.matches()) {
|
|
||||||
final String accountId = matcher.group("accountId");
|
|
||||||
final String streamName = matcher.group("streamName");
|
|
||||||
final Long creationEpoch = Long.valueOf(matcher.group("creationEpoch"));
|
|
||||||
|
|
||||||
validateCreationEpoch(creationEpoch);
|
|
||||||
|
|
||||||
return StreamIdentifier.builder()
|
|
||||||
.accountIdOptional(Optional.of(accountId))
|
|
||||||
.streamName(streamName)
|
|
||||||
.streamCreationEpochOptional(Optional.of(creationEpoch))
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
throw new IllegalArgumentException("Unable to deserialize StreamIdentifier from " + streamIdentifierSer);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a multi stream instance for StreamIdentifier from stream {@link Arn}.
|
|
||||||
*
|
|
||||||
* @param streamArn an {@link Arn} of format {@link #STREAM_ARN_PATTERN}
|
|
||||||
* @param creationEpoch Creation epoch of the stream. This value will
|
|
||||||
* reflect in the lease key and is assumed to be correct. (KCL could
|
|
||||||
* verify, but that creates issues for both bootstrapping and, with large
|
|
||||||
* KCL applications, API throttling against DescribeStreamSummary.)
|
|
||||||
* If this epoch is reused for two identically-named streams in the same
|
|
||||||
* account -- such as deleting and recreating a stream -- then KCL will
|
|
||||||
* <b>be unable to differentiate leases between the old and new stream</b>
|
|
||||||
* since the lease keys collide on this creation epoch.
|
|
||||||
* @return StreamIdentifier with {@link #accountIdOptional}, {@link #streamCreationEpochOptional},
|
|
||||||
* and {@link #streamArnOptional} present
|
|
||||||
*/
|
|
||||||
public static StreamIdentifier multiStreamInstance(Arn streamArn, long creationEpoch) {
|
|
||||||
validateArn(streamArn);
|
|
||||||
validateCreationEpoch(creationEpoch);
|
|
||||||
|
|
||||||
return StreamIdentifier.builder()
|
|
||||||
.accountIdOptional(streamArn.accountId())
|
|
||||||
.streamName(streamArn.resource().resource())
|
|
||||||
.streamCreationEpochOptional(Optional.of(creationEpoch))
|
|
||||||
.streamArnOptional(Optional.of(streamArn))
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a single stream instance for StreamIdentifier from stream name.
|
|
||||||
*
|
|
||||||
* @param streamName stream name of a Kinesis stream
|
|
||||||
*/
|
|
||||||
public static StreamIdentifier singleStreamInstance(String streamName) {
|
|
||||||
Validate.notEmpty(streamName, "StreamName should not be empty");
|
|
||||||
|
|
||||||
return StreamIdentifier.builder().streamName(streamName).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a single stream instance for StreamIdentifier from AWS Kinesis stream {@link Arn}.
|
|
||||||
*
|
|
||||||
* @param streamArn AWS ARN of a Kinesis stream
|
|
||||||
* @return StreamIdentifier with {@link #accountIdOptional} and {@link #streamArnOptional} present
|
|
||||||
*/
|
|
||||||
public static StreamIdentifier singleStreamInstance(Arn streamArn) {
|
|
||||||
validateArn(streamArn);
|
|
||||||
|
|
||||||
return StreamIdentifier.builder()
|
|
||||||
.accountIdOptional(streamArn.accountId())
|
|
||||||
.streamName(streamArn.resource().resource())
|
|
||||||
.streamArnOptional(Optional.of(streamArn))
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Verify the streamArn follows the appropriate formatting.
|
|
||||||
* Throw an exception if it does not.
|
|
||||||
* @param streamArn
|
|
||||||
*/
|
|
||||||
public static void validateArn(Arn streamArn) {
|
|
||||||
if (!STREAM_ARN_PATTERN.matcher(streamArn.toString()).matches()
|
|
||||||
|| !streamArn.region().isPresent()) {
|
|
||||||
throw new IllegalArgumentException("Invalid streamArn " + streamArn);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Verify creationEpoch is greater than 0.
|
|
||||||
* Throw an exception if it is not.
|
|
||||||
* @param creationEpoch
|
|
||||||
*/
|
|
||||||
private static void validateCreationEpoch(long creationEpoch) {
|
|
||||||
if (creationEpoch <= 0) {
|
|
||||||
throw new IllegalArgumentException("Creation epoch must be > 0; received " + creationEpoch);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,157 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import lombok.Data;
|
|
||||||
import lombok.NonNull;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import software.amazon.kinesis.common.DdbTableConfig;
|
|
||||||
import software.amazon.kinesis.leases.NoOpShardPrioritization;
|
|
||||||
import software.amazon.kinesis.leases.ShardPrioritization;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Used by the KCL to configure the coordinator.
|
|
||||||
*/
|
|
||||||
@Data
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
public class CoordinatorConfig {
|
|
||||||
|
|
||||||
private static final int PERIODIC_SHARD_SYNC_MAX_WORKERS_DEFAULT = 1;
|
|
||||||
|
|
||||||
public CoordinatorConfig(final String applicationName) {
|
|
||||||
this.applicationName = applicationName;
|
|
||||||
this.coordinatorStateTableConfig = new CoordinatorStateTableConfig(applicationName);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Application name used by checkpointer to checkpoint.
|
|
||||||
*
|
|
||||||
* @return String
|
|
||||||
*/
|
|
||||||
@NonNull
|
|
||||||
private final String applicationName;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The maximum number of attempts to initialize the Scheduler
|
|
||||||
*
|
|
||||||
* <p>Default value: 20</p>
|
|
||||||
*/
|
|
||||||
private int maxInitializationAttempts = 20;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Interval in milliseconds between polling to check for parent shard completion.
|
|
||||||
* Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on
|
|
||||||
* completion of parent shards).
|
|
||||||
*
|
|
||||||
* <p>Default value: 10000L</p>
|
|
||||||
*/
|
|
||||||
private long parentShardPollIntervalMillis = 10000L;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The Scheduler will skip shard sync during initialization if there are one or more leases in the lease table. This
|
|
||||||
* assumes that the shards and leases are in-sync. This enables customers to choose faster startup times (e.g.
|
|
||||||
* during incremental deployments of an application).
|
|
||||||
*
|
|
||||||
* <p>Default value: false</p>
|
|
||||||
*/
|
|
||||||
private boolean skipShardSyncAtWorkerInitializationIfLeasesExist = false;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The number of milliseconds between polling of the shard consumer for triggering state changes, and health checks.
|
|
||||||
*
|
|
||||||
* <p>Default value: 1000 milliseconds</p>
|
|
||||||
*/
|
|
||||||
private long shardConsumerDispatchPollIntervalMillis = 1000L;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Shard prioritization strategy.
|
|
||||||
*
|
|
||||||
* <p>Default value: {@link NoOpShardPrioritization}</p>
|
|
||||||
*/
|
|
||||||
private ShardPrioritization shardPrioritization = new NoOpShardPrioritization();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* WorkerStateChangeListener to be used by the Scheduler.
|
|
||||||
*
|
|
||||||
* <p>Default value: {@link NoOpWorkerStateChangeListener}</p>
|
|
||||||
*/
|
|
||||||
private WorkerStateChangeListener workerStateChangeListener = new NoOpWorkerStateChangeListener();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* GracefulShutdownCoordinator to be used by the Scheduler.
|
|
||||||
*
|
|
||||||
* <p>Default value: {@link GracefulShutdownCoordinator}</p>
|
|
||||||
*/
|
|
||||||
private GracefulShutdownCoordinator gracefulShutdownCoordinator = new GracefulShutdownCoordinator();
|
|
||||||
|
|
||||||
private CoordinatorFactory coordinatorFactory = new SchedulerCoordinatorFactory();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Interval in milliseconds between retrying the scheduler initialization.
|
|
||||||
*
|
|
||||||
* <p>Default value: 1000L</p>
|
|
||||||
*/
|
|
||||||
private long schedulerInitializationBackoffTimeMillis = 1000L;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Version the KCL needs to operate in. For more details check the KCLv3 migration
|
|
||||||
* documentation.
|
|
||||||
*/
|
|
||||||
public enum ClientVersionConfig {
|
|
||||||
/**
|
|
||||||
* For an application that was operating with previous KCLv2.x, during
|
|
||||||
* upgrade to KCLv3.x, a migration process is needed due to the incompatible
|
|
||||||
* changes between the 2 versions. During the migration process, application
|
|
||||||
* must use ClientVersion=CLIENT_VERSION_COMPATIBLE_WITH_2x so that it runs in
|
|
||||||
* a compatible mode until all workers in the cluster have upgraded to the version
|
|
||||||
* running 3.x version (which is determined based on workers emitting WorkerMetricStats)
|
|
||||||
* Once all known workers are in 3.x mode, the library auto toggles to 3.x mode;
|
|
||||||
* but prior to that it runs in a mode compatible with 2.x workers.
|
|
||||||
* This version also allows rolling back to the compatible mode from the
|
|
||||||
* auto-toggled 3.x mode.
|
|
||||||
*/
|
|
||||||
CLIENT_VERSION_CONFIG_COMPATIBLE_WITH_2X,
|
|
||||||
/**
|
|
||||||
* A new application operating with KCLv3.x will use this value. Also, an application
|
|
||||||
* that has successfully upgraded to 3.x version and no longer needs the ability
|
|
||||||
* for a rollback to a 2.x compatible version, will use this value. In this version,
|
|
||||||
* KCL will operate with new algorithms introduced in 3.x which is not compatible
|
|
||||||
* with prior versions. And once in this version, rollback to 2.x is not supported.
|
|
||||||
*/
|
|
||||||
CLIENT_VERSION_CONFIG_3X,
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Client version KCL must operate in, by default it operates in 3.x version which is not
|
|
||||||
* compatible with prior versions.
|
|
||||||
*/
|
|
||||||
private ClientVersionConfig clientVersionConfig = ClientVersionConfig.CLIENT_VERSION_CONFIG_3X;
|
|
||||||
|
|
||||||
public static class CoordinatorStateTableConfig extends DdbTableConfig {
|
|
||||||
private CoordinatorStateTableConfig(final String applicationName) {
|
|
||||||
super(applicationName, "CoordinatorState");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Configuration to control how the CoordinatorState DDB table is created, such as table name,
|
|
||||||
* billing mode, provisioned capacity. If no table name is specified, the table name will
|
|
||||||
* default to applicationName-CoordinatorState. If no billing more is chosen, default is
|
|
||||||
* On-Demand.
|
|
||||||
*/
|
|
||||||
@NonNull
|
|
||||||
private final CoordinatorStateTableConfig coordinatorStateTableConfig;
|
|
||||||
}
|
|
||||||
|
|
@ -1,89 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
|
|
||||||
import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer;
|
|
||||||
import software.amazon.kinesis.leases.ShardInfo;
|
|
||||||
import software.amazon.kinesis.processor.Checkpointer;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Used in the process of configuring and providing instances to the {@link Scheduler}
|
|
||||||
*/
|
|
||||||
public interface CoordinatorFactory {
|
|
||||||
/**
|
|
||||||
* Creates the executor service to be used by the Scheduler.
|
|
||||||
*
|
|
||||||
* @return ExecutorService
|
|
||||||
*/
|
|
||||||
ExecutorService createExecutorService();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates GracefulShutdownCoordinator to be used by the Scheduler.
|
|
||||||
*
|
|
||||||
* <h3>Method Deprecated</h3>
|
|
||||||
* <p>
|
|
||||||
* <strong>Note: This method has been deprecated, and will be removed in a future release. Use the configuration in
|
|
||||||
* {@link CoordinatorConfig#gracefulShutdownCoordinator}. Set the
|
|
||||||
* {@link CoordinatorConfig#gracefulShutdownCoordinator} to null in order to use this method.</strong>
|
|
||||||
* </p>
|
|
||||||
* <h4>Resolution Order</h3>
|
|
||||||
* <ol>
|
|
||||||
* <li>{@link CoordinatorConfig#gracefulShutdownCoordinator()}</li>
|
|
||||||
* <li>{@link CoordinatorFactory#createGracefulShutdownCoordinator()}</li>
|
|
||||||
* </ol>
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* @return a {@link GracefulShutdownCoordinator} that manages the process of shutting down the scheduler.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default GracefulShutdownCoordinator createGracefulShutdownCoordinator() {
|
|
||||||
return new GracefulShutdownCoordinator();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a WorkerStateChangeListener to be used by the Scheduler.
|
|
||||||
*
|
|
||||||
* <h3>Method Deprecated</h3>
|
|
||||||
* <p>
|
|
||||||
* <strong>Note: This method has been deprecated, and will be removed in a future release. Use the configuration in
|
|
||||||
* {@link CoordinatorConfig#workerStateChangeListener}. Set the
|
|
||||||
* {@link CoordinatorConfig#workerStateChangeListener} to null in order to use this method.</strong>
|
|
||||||
* </p>
|
|
||||||
*
|
|
||||||
* <h4>Resolution Order</h3>
|
|
||||||
* <ol>
|
|
||||||
* <li>{@link CoordinatorConfig#workerStateChangeListener()}</li>
|
|
||||||
* <li>{@link CoordinatorFactory#createWorkerStateChangeListener()}</li>
|
|
||||||
* </ol>
|
|
||||||
*
|
|
||||||
* @return a {@link WorkerStateChangeListener} instance that will be notified for specific {@link Scheduler} steps.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default WorkerStateChangeListener createWorkerStateChangeListener() {
|
|
||||||
return new NoOpWorkerStateChangeListener();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a RecordProcessorChedckpointer to be used by the Scheduler.
|
|
||||||
*
|
|
||||||
* @param shardInfo ShardInfo to be used in order to create the ShardRecordProcessorCheckpointer
|
|
||||||
* @param checkpoint Checkpointer to be used in order to create Shardthe RecordProcessorCheckpointer
|
|
||||||
* @return ShardRecordProcessorCheckpointer
|
|
||||||
*/
|
|
||||||
ShardRecordProcessorCheckpointer createRecordProcessorCheckpointer(ShardInfo shardInfo, Checkpointer checkpoint);
|
|
||||||
}
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import lombok.AccessLevel;
|
|
||||||
import lombok.AllArgsConstructor;
|
|
||||||
import lombok.Builder;
|
|
||||||
import lombok.Data;
|
|
||||||
import lombok.NoArgsConstructor;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* DataModel for CoordinatorState, this data model is used to store various state information required
|
|
||||||
* for coordination across the KCL worker fleet. Therefore, the model follows a flexible schema.
|
|
||||||
*/
|
|
||||||
@Data
|
|
||||||
@Builder
|
|
||||||
@NoArgsConstructor
|
|
||||||
@AllArgsConstructor(access = AccessLevel.PRIVATE)
|
|
||||||
@Slf4j
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public class CoordinatorState {
|
|
||||||
public static final String COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME = "key";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Key value for the item in the CoordinatorState table used for leader
|
|
||||||
* election among the KCL workers. The attributes relevant to this item
|
|
||||||
* is dictated by the DDB Lock client implementation that is used to
|
|
||||||
* provide mutual exclusion.
|
|
||||||
*/
|
|
||||||
public static final String LEADER_HASH_KEY = "Leader";
|
|
||||||
|
|
||||||
private String key;
|
|
||||||
|
|
||||||
private Map<String, AttributeValue> attributes;
|
|
||||||
}
|
|
||||||
|
|
@ -1,427 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Objects;
|
|
||||||
|
|
||||||
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBLockClientOptions;
|
|
||||||
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBLockClientOptions.AmazonDynamoDBLockClientOptionsBuilder;
|
|
||||||
import lombok.NonNull;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.commons.collections4.MapUtils;
|
|
||||||
import software.amazon.awssdk.core.waiters.WaiterResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.AttributeAction;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BillingMode;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.CreateTableResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.DescribeTableResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.DynamoDbException;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.GetItemRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.GetItemResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.KeyType;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughputExceededException;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.PutItemRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ScanRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ScanResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.TableDescription;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.TableStatus;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.waiters.DynamoDbAsyncWaiter;
|
|
||||||
import software.amazon.awssdk.utils.CollectionUtils;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.common.FutureUtils;
|
|
||||||
import software.amazon.kinesis.coordinator.CoordinatorConfig.CoordinatorStateTableConfig;
|
|
||||||
import software.amazon.kinesis.coordinator.migration.MigrationState;
|
|
||||||
import software.amazon.kinesis.leases.DynamoUtils;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.DependencyException;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.InvalidStateException;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException;
|
|
||||||
import software.amazon.kinesis.utils.DdbUtil;
|
|
||||||
|
|
||||||
import static java.util.Objects.nonNull;
|
|
||||||
import static software.amazon.kinesis.common.FutureUtils.unwrappingFuture;
|
|
||||||
import static software.amazon.kinesis.coordinator.CoordinatorState.COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Data Access Object to abstract accessing {@link CoordinatorState} from
|
|
||||||
* the CoordinatorState DDB table.
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public class CoordinatorStateDAO {
|
|
||||||
private final DynamoDbAsyncClient dynamoDbAsyncClient;
|
|
||||||
private final DynamoDbClient dynamoDbSyncClient;
|
|
||||||
|
|
||||||
private final CoordinatorStateTableConfig config;
|
|
||||||
|
|
||||||
public CoordinatorStateDAO(
|
|
||||||
final DynamoDbAsyncClient dynamoDbAsyncClient, final CoordinatorStateTableConfig config) {
|
|
||||||
this.dynamoDbAsyncClient = dynamoDbAsyncClient;
|
|
||||||
this.config = config;
|
|
||||||
this.dynamoDbSyncClient = createDelegateClient();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void initialize() throws DependencyException {
|
|
||||||
createTableIfNotExists();
|
|
||||||
}
|
|
||||||
|
|
||||||
private DynamoDbClient createDelegateClient() {
|
|
||||||
return new DynamoDbAsyncToSyncClientAdapter(dynamoDbAsyncClient);
|
|
||||||
}
|
|
||||||
|
|
||||||
public AmazonDynamoDBLockClientOptionsBuilder getDDBLockClientOptionsBuilder() {
|
|
||||||
return AmazonDynamoDBLockClientOptions.builder(dynamoDbSyncClient, config.tableName())
|
|
||||||
.withPartitionKeyName(COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* List all the {@link CoordinatorState} from the DDB table synchronously
|
|
||||||
*
|
|
||||||
* @throws DependencyException if DynamoDB scan fails in an unexpected way
|
|
||||||
* @throws InvalidStateException if ddb table does not exist
|
|
||||||
* @throws ProvisionedThroughputException if DynamoDB scan fails due to lack of capacity
|
|
||||||
*
|
|
||||||
* @return list of state
|
|
||||||
*/
|
|
||||||
public List<CoordinatorState> listCoordinatorState()
|
|
||||||
throws ProvisionedThroughputException, DependencyException, InvalidStateException {
|
|
||||||
log.debug("Listing coordinatorState");
|
|
||||||
|
|
||||||
final ScanRequest request =
|
|
||||||
ScanRequest.builder().tableName(config.tableName()).build();
|
|
||||||
|
|
||||||
try {
|
|
||||||
ScanResponse response = FutureUtils.unwrappingFuture(() -> dynamoDbAsyncClient.scan(request));
|
|
||||||
final List<CoordinatorState> stateList = new ArrayList<>();
|
|
||||||
while (Objects.nonNull(response)) {
|
|
||||||
log.debug("Scan response {}", response);
|
|
||||||
|
|
||||||
response.items().stream().map(this::fromDynamoRecord).forEach(stateList::add);
|
|
||||||
if (!CollectionUtils.isNullOrEmpty(response.lastEvaluatedKey())) {
|
|
||||||
final ScanRequest continuationRequest = request.toBuilder()
|
|
||||||
.exclusiveStartKey(response.lastEvaluatedKey())
|
|
||||||
.build();
|
|
||||||
log.debug("Scan request {}", continuationRequest);
|
|
||||||
response = FutureUtils.unwrappingFuture(() -> dynamoDbAsyncClient.scan(continuationRequest));
|
|
||||||
} else {
|
|
||||||
log.debug("Scan finished");
|
|
||||||
response = null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return stateList;
|
|
||||||
} catch (final ProvisionedThroughputExceededException e) {
|
|
||||||
log.warn(
|
|
||||||
"Provisioned throughput on {} has exceeded. It is recommended to increase the IOPs"
|
|
||||||
+ " on the table.",
|
|
||||||
config.tableName());
|
|
||||||
throw new ProvisionedThroughputException(e);
|
|
||||||
} catch (final ResourceNotFoundException e) {
|
|
||||||
throw new InvalidStateException(
|
|
||||||
String.format("Cannot list coordinatorState, because table %s does not exist", config.tableName()));
|
|
||||||
} catch (final DynamoDbException e) {
|
|
||||||
throw new DependencyException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a new {@link CoordinatorState} if it does not exist.
|
|
||||||
* @param state the state to create
|
|
||||||
* @return true if state was created, false if it already exists
|
|
||||||
*
|
|
||||||
* @throws DependencyException if DynamoDB put fails in an unexpected way
|
|
||||||
* @throws InvalidStateException if lease table does not exist
|
|
||||||
* @throws ProvisionedThroughputException if DynamoDB put fails due to lack of capacity
|
|
||||||
*/
|
|
||||||
public boolean createCoordinatorStateIfNotExists(final CoordinatorState state)
|
|
||||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
|
||||||
log.debug("Creating coordinatorState {}", state);
|
|
||||||
|
|
||||||
final PutItemRequest request = PutItemRequest.builder()
|
|
||||||
.tableName(config.tableName())
|
|
||||||
.item(toDynamoRecord(state))
|
|
||||||
.expected(getDynamoNonExistentExpectation())
|
|
||||||
.build();
|
|
||||||
|
|
||||||
try {
|
|
||||||
FutureUtils.unwrappingFuture(() -> dynamoDbAsyncClient.putItem(request));
|
|
||||||
} catch (final ConditionalCheckFailedException e) {
|
|
||||||
log.info("Not creating coordinator state because the key already exists");
|
|
||||||
return false;
|
|
||||||
} catch (final ProvisionedThroughputExceededException e) {
|
|
||||||
log.warn(
|
|
||||||
"Provisioned throughput on {} has exceeded. It is recommended to increase the IOPs"
|
|
||||||
+ " on the table.",
|
|
||||||
config.tableName());
|
|
||||||
throw new ProvisionedThroughputException(e);
|
|
||||||
} catch (final ResourceNotFoundException e) {
|
|
||||||
throw new InvalidStateException(String.format(
|
|
||||||
"Cannot create coordinatorState %s, because table %s does not exist", state, config.tableName()));
|
|
||||||
} catch (final DynamoDbException e) {
|
|
||||||
throw new DependencyException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info("Created CoordinatorState: {}", state);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param key Get the CoordinatorState for this key
|
|
||||||
*
|
|
||||||
* @throws InvalidStateException if ddb table does not exist
|
|
||||||
* @throws ProvisionedThroughputException if DynamoDB get fails due to lack of capacity
|
|
||||||
* @throws DependencyException if DynamoDB get fails in an unexpected way
|
|
||||||
*
|
|
||||||
* @return state for the specified key, or null if one doesn't exist
|
|
||||||
*/
|
|
||||||
public CoordinatorState getCoordinatorState(@NonNull final String key)
|
|
||||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
|
||||||
log.debug("Getting coordinatorState with key {}", key);
|
|
||||||
|
|
||||||
final GetItemRequest request = GetItemRequest.builder()
|
|
||||||
.tableName(config.tableName())
|
|
||||||
.key(getCoordinatorStateKey(key))
|
|
||||||
.consistentRead(true)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
try {
|
|
||||||
final GetItemResponse result = FutureUtils.unwrappingFuture(() -> dynamoDbAsyncClient.getItem(request));
|
|
||||||
|
|
||||||
final Map<String, AttributeValue> dynamoRecord = result.item();
|
|
||||||
if (CollectionUtils.isNullOrEmpty(dynamoRecord)) {
|
|
||||||
log.debug("No coordinatorState found with key {}, returning null.", key);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return fromDynamoRecord(dynamoRecord);
|
|
||||||
} catch (final ProvisionedThroughputExceededException e) {
|
|
||||||
log.warn(
|
|
||||||
"Provisioned throughput on {} has exceeded. It is recommended to increase the IOPs"
|
|
||||||
+ " on the table.",
|
|
||||||
config.tableName());
|
|
||||||
throw new ProvisionedThroughputException(e);
|
|
||||||
} catch (final ResourceNotFoundException e) {
|
|
||||||
throw new InvalidStateException(String.format(
|
|
||||||
"Cannot get coordinatorState for key %s, because table %s does not exist",
|
|
||||||
key, config.tableName()));
|
|
||||||
} catch (final DynamoDbException e) {
|
|
||||||
throw new DependencyException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Update fields of the given coordinator state in DynamoDB. Conditional on the provided expectation.
|
|
||||||
*
|
|
||||||
* @return true if update succeeded, false otherwise when expectations are not met
|
|
||||||
*
|
|
||||||
* @throws InvalidStateException if table does not exist
|
|
||||||
* @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity
|
|
||||||
* @throws DependencyException if DynamoDB update fails in an unexpected way
|
|
||||||
*/
|
|
||||||
public boolean updateCoordinatorStateWithExpectation(
|
|
||||||
@NonNull final CoordinatorState state, final Map<String, ExpectedAttributeValue> expectations)
|
|
||||||
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
|
|
||||||
final Map<String, ExpectedAttributeValue> expectationMap = getDynamoExistentExpectation(state.getKey());
|
|
||||||
expectationMap.putAll(MapUtils.emptyIfNull(expectations));
|
|
||||||
|
|
||||||
final Map<String, AttributeValueUpdate> updateMap = getDynamoCoordinatorStateUpdate(state);
|
|
||||||
|
|
||||||
final UpdateItemRequest request = UpdateItemRequest.builder()
|
|
||||||
.tableName(config.tableName())
|
|
||||||
.key(getCoordinatorStateKey(state.getKey()))
|
|
||||||
.expected(expectationMap)
|
|
||||||
.attributeUpdates(updateMap)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
try {
|
|
||||||
FutureUtils.unwrappingFuture(() -> dynamoDbAsyncClient.updateItem(request));
|
|
||||||
} catch (final ConditionalCheckFailedException e) {
|
|
||||||
log.debug("CoordinatorState update {} failed because conditions were not met", state);
|
|
||||||
return false;
|
|
||||||
} catch (final ProvisionedThroughputExceededException e) {
|
|
||||||
log.warn(
|
|
||||||
"Provisioned throughput on {} has exceeded. It is recommended to increase the IOPs"
|
|
||||||
+ " on the table.",
|
|
||||||
config.tableName());
|
|
||||||
throw new ProvisionedThroughputException(e);
|
|
||||||
} catch (final ResourceNotFoundException e) {
|
|
||||||
throw new InvalidStateException(String.format(
|
|
||||||
"Cannot update coordinatorState for key %s, because table %s does not exist",
|
|
||||||
state.getKey(), config.tableName()));
|
|
||||||
} catch (final DynamoDbException e) {
|
|
||||||
throw new DependencyException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info("Coordinator state updated {}", state);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void createTableIfNotExists() throws DependencyException {
|
|
||||||
TableDescription tableDescription = getTableDescription();
|
|
||||||
if (tableDescription == null) {
|
|
||||||
final CreateTableResponse response = unwrappingFuture(() -> dynamoDbAsyncClient.createTable(getRequest()));
|
|
||||||
tableDescription = response.tableDescription();
|
|
||||||
log.info("DDB Table: {} created", config.tableName());
|
|
||||||
} else {
|
|
||||||
log.info("Skipping DDB table {} creation as it already exists", config.tableName());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tableDescription.tableStatus() != TableStatus.ACTIVE) {
|
|
||||||
log.info("Waiting for DDB Table: {} to become active", config.tableName());
|
|
||||||
try (final DynamoDbAsyncWaiter waiter = dynamoDbAsyncClient.waiter()) {
|
|
||||||
final WaiterResponse<DescribeTableResponse> response =
|
|
||||||
unwrappingFuture(() -> waiter.waitUntilTableExists(
|
|
||||||
r -> r.tableName(config.tableName()), o -> o.waitTimeout(Duration.ofMinutes(10))));
|
|
||||||
response.matched()
|
|
||||||
.response()
|
|
||||||
.orElseThrow(() -> new DependencyException(new IllegalStateException(
|
|
||||||
"Creating CoordinatorState table timed out",
|
|
||||||
response.matched().exception().orElse(null))));
|
|
||||||
}
|
|
||||||
unwrappingFuture(() -> DdbUtil.pitrEnabler(config, dynamoDbAsyncClient));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private CreateTableRequest getRequest() {
|
|
||||||
final CreateTableRequest.Builder requestBuilder = CreateTableRequest.builder()
|
|
||||||
.tableName(config.tableName())
|
|
||||||
.keySchema(KeySchemaElement.builder()
|
|
||||||
.attributeName(COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME)
|
|
||||||
.keyType(KeyType.HASH)
|
|
||||||
.build())
|
|
||||||
.attributeDefinitions(AttributeDefinition.builder()
|
|
||||||
.attributeName(COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME)
|
|
||||||
.attributeType(ScalarAttributeType.S)
|
|
||||||
.build())
|
|
||||||
.deletionProtectionEnabled(config.deletionProtectionEnabled());
|
|
||||||
|
|
||||||
if (nonNull(config.tags()) && !config.tags().isEmpty()) {
|
|
||||||
requestBuilder.tags(config.tags());
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (config.billingMode()) {
|
|
||||||
case PAY_PER_REQUEST:
|
|
||||||
requestBuilder.billingMode(BillingMode.PAY_PER_REQUEST);
|
|
||||||
break;
|
|
||||||
case PROVISIONED:
|
|
||||||
requestBuilder.billingMode(BillingMode.PROVISIONED);
|
|
||||||
|
|
||||||
final ProvisionedThroughput throughput = ProvisionedThroughput.builder()
|
|
||||||
.readCapacityUnits(config.readCapacity())
|
|
||||||
.writeCapacityUnits(config.writeCapacity())
|
|
||||||
.build();
|
|
||||||
requestBuilder.provisionedThroughput(throughput);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return requestBuilder.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<String, AttributeValue> getCoordinatorStateKey(@NonNull final String key) {
|
|
||||||
return Collections.singletonMap(
|
|
||||||
COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME, DynamoUtils.createAttributeValue(key));
|
|
||||||
}
|
|
||||||
|
|
||||||
private CoordinatorState fromDynamoRecord(final Map<String, AttributeValue> dynamoRecord) {
|
|
||||||
final HashMap<String, AttributeValue> attributes = new HashMap<>(dynamoRecord);
|
|
||||||
final String keyValue =
|
|
||||||
DynamoUtils.safeGetString(attributes.remove(COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME));
|
|
||||||
|
|
||||||
final MigrationState migrationState = MigrationState.deserialize(keyValue, attributes);
|
|
||||||
if (migrationState != null) {
|
|
||||||
log.debug("Retrieved MigrationState {}", migrationState);
|
|
||||||
return migrationState;
|
|
||||||
}
|
|
||||||
|
|
||||||
final CoordinatorState c =
|
|
||||||
CoordinatorState.builder().key(keyValue).attributes(attributes).build();
|
|
||||||
log.debug("Retrieved coordinatorState {}", c);
|
|
||||||
|
|
||||||
return c;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<String, AttributeValue> toDynamoRecord(final CoordinatorState state) {
|
|
||||||
final Map<String, AttributeValue> result = new HashMap<>();
|
|
||||||
result.put(COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME, DynamoUtils.createAttributeValue(state.getKey()));
|
|
||||||
if (state instanceof MigrationState) {
|
|
||||||
result.putAll(((MigrationState) state).serialize());
|
|
||||||
}
|
|
||||||
if (!CollectionUtils.isNullOrEmpty(state.getAttributes())) {
|
|
||||||
result.putAll(state.getAttributes());
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<String, ExpectedAttributeValue> getDynamoNonExistentExpectation() {
|
|
||||||
final Map<String, ExpectedAttributeValue> result = new HashMap<>();
|
|
||||||
|
|
||||||
final ExpectedAttributeValue expectedAV =
|
|
||||||
ExpectedAttributeValue.builder().exists(false).build();
|
|
||||||
result.put(COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME, expectedAV);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<String, ExpectedAttributeValue> getDynamoExistentExpectation(final String keyValue) {
|
|
||||||
final Map<String, ExpectedAttributeValue> result = new HashMap<>();
|
|
||||||
|
|
||||||
final ExpectedAttributeValue expectedAV = ExpectedAttributeValue.builder()
|
|
||||||
.value(AttributeValue.fromS(keyValue))
|
|
||||||
.build();
|
|
||||||
result.put(COORDINATOR_STATE_TABLE_HASH_KEY_ATTRIBUTE_NAME, expectedAV);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<String, AttributeValueUpdate> getDynamoCoordinatorStateUpdate(final CoordinatorState state) {
|
|
||||||
final HashMap<String, AttributeValueUpdate> updates = new HashMap<>();
|
|
||||||
if (state instanceof MigrationState) {
|
|
||||||
updates.putAll(((MigrationState) state).getDynamoUpdate());
|
|
||||||
}
|
|
||||||
state.getAttributes()
|
|
||||||
.forEach((attribute, value) -> updates.put(
|
|
||||||
attribute,
|
|
||||||
AttributeValueUpdate.builder()
|
|
||||||
.value(value)
|
|
||||||
.action(AttributeAction.PUT)
|
|
||||||
.build()));
|
|
||||||
return updates;
|
|
||||||
}
|
|
||||||
|
|
||||||
private TableDescription getTableDescription() {
|
|
||||||
try {
|
|
||||||
final DescribeTableResponse response = unwrappingFuture(() -> dynamoDbAsyncClient.describeTable(
|
|
||||||
DescribeTableRequest.builder().tableName(config.tableName()).build()));
|
|
||||||
return response.table();
|
|
||||||
} catch (final ResourceNotFoundException e) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,37 +0,0 @@
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.kinesis.common.StreamIdentifier;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class is used for storing in-memory set of streams which are no longer existing (deleted) and needs to be
|
|
||||||
* cleaned up from KCL's in memory state.
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
public class DeletedStreamListProvider {
|
|
||||||
|
|
||||||
private final Set<StreamIdentifier> deletedStreams;
|
|
||||||
|
|
||||||
public DeletedStreamListProvider() {
|
|
||||||
deletedStreams = ConcurrentHashMap.newKeySet();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void add(StreamIdentifier streamIdentifier) {
|
|
||||||
log.info("Added {}", streamIdentifier);
|
|
||||||
deletedStreams.add(streamIdentifier);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Method returns and empties the current set of streams
|
|
||||||
* @return set of deleted Streams
|
|
||||||
*/
|
|
||||||
public Set<StreamIdentifier> purgeAllDeletedStream() {
|
|
||||||
final Set<StreamIdentifier> response = new HashSet<>(deletedStreams);
|
|
||||||
deletedStreams.removeAll(response);
|
|
||||||
return response;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,35 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
|
|
||||||
import lombok.NoArgsConstructor;
|
|
||||||
import software.amazon.kinesis.leases.LeaseCoordinator;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates {@link DiagnosticEvent}s for logging and visibility
|
|
||||||
*/
|
|
||||||
@NoArgsConstructor
|
|
||||||
class DiagnosticEventFactory {
|
|
||||||
ExecutorStateEvent executorStateEvent(ExecutorService executorService, LeaseCoordinator leaseCoordinator) {
|
|
||||||
return new ExecutorStateEvent(executorService, leaseCoordinator);
|
|
||||||
}
|
|
||||||
|
|
||||||
RejectedTaskEvent rejectedTaskEvent(ExecutorStateEvent executorStateEvent, Throwable t) {
|
|
||||||
return new RejectedTaskEvent(executorStateEvent, t);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* An interface to implement behaviors associated with a {@link DiagnosticEvent}. Uses the visitor pattern to visit
|
|
||||||
* the DiagnosticEvent when the behavior is desired. A default implementation that performs simple logging is found in
|
|
||||||
* {@link DiagnosticEventLogger}.
|
|
||||||
*/
|
|
||||||
interface DiagnosticEventHandler {
|
|
||||||
/**
|
|
||||||
* @param event Log or otherwise react to periodic pulses on the thread pool executor state.
|
|
||||||
*/
|
|
||||||
void visit(ExecutorStateEvent event);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param event Log or otherwise react to rejected tasks in the RxJavaPlugin layer.
|
|
||||||
*/
|
|
||||||
void visit(RejectedTaskEvent event);
|
|
||||||
}
|
|
||||||
|
|
@ -1,55 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import lombok.NoArgsConstructor;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Internal implementation of {@link DiagnosticEventHandler} used by {@link Scheduler} to log executor state both
|
|
||||||
* 1) in normal conditions periodically, and 2) in reaction to rejected task exceptions.
|
|
||||||
*/
|
|
||||||
@NoArgsConstructor
|
|
||||||
@Slf4j
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
class DiagnosticEventLogger implements DiagnosticEventHandler {
|
|
||||||
private static final long EXECUTOR_LOG_INTERVAL_MILLIS = 30000L;
|
|
||||||
private long nextExecutorLogTime = System.currentTimeMillis() + EXECUTOR_LOG_INTERVAL_MILLIS;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*
|
|
||||||
* Only log at info level every 30s to avoid over-logging, else log at debug level
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void visit(ExecutorStateEvent event) {
|
|
||||||
if (System.currentTimeMillis() >= nextExecutorLogTime) {
|
|
||||||
log.info(event.message());
|
|
||||||
nextExecutorLogTime = System.currentTimeMillis() + EXECUTOR_LOG_INTERVAL_MILLIS;
|
|
||||||
} else {
|
|
||||||
log.debug(event.message());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void visit(RejectedTaskEvent event) {
|
|
||||||
log.error(event.message(), event.getThrowable());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,409 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
import java.util.concurrent.ScheduledFuture;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.function.BiFunction;
|
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
import lombok.AccessLevel;
|
|
||||||
import lombok.Builder;
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.awssdk.annotations.ThreadSafe;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.coordinator.MigrationAdaptiveLeaseAssignmentModeProvider.LeaseAssignmentMode;
|
|
||||||
import software.amazon.kinesis.coordinator.assignment.LeaseAssignmentManager;
|
|
||||||
import software.amazon.kinesis.coordinator.migration.ClientVersion;
|
|
||||||
import software.amazon.kinesis.leader.DynamoDBLockBasedLeaderDecider;
|
|
||||||
import software.amazon.kinesis.leader.MigrationAdaptiveLeaderDecider;
|
|
||||||
import software.amazon.kinesis.leases.LeaseManagementConfig.WorkerUtilizationAwareAssignmentConfig;
|
|
||||||
import software.amazon.kinesis.leases.LeaseRefresher;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.DependencyException;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsFactory;
|
|
||||||
import software.amazon.kinesis.worker.metricstats.WorkerMetricStatsDAO;
|
|
||||||
import software.amazon.kinesis.worker.metricstats.WorkerMetricStatsManager;
|
|
||||||
import software.amazon.kinesis.worker.metricstats.WorkerMetricStatsReporter;
|
|
||||||
|
|
||||||
import static software.amazon.kinesis.coordinator.MigrationAdaptiveLeaseAssignmentModeProvider.LeaseAssignmentMode.DEFAULT_LEASE_COUNT_BASED_ASSIGNMENT;
|
|
||||||
import static software.amazon.kinesis.coordinator.MigrationAdaptiveLeaseAssignmentModeProvider.LeaseAssignmentMode.WORKER_UTILIZATION_AWARE_ASSIGNMENT;
|
|
||||||
import static software.amazon.kinesis.coordinator.assignment.LeaseAssignmentManager.DEFAULT_NO_OF_SKIP_STAT_FOR_DEAD_WORKER_THRESHOLD;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class is responsible for initializing the KCL components that supports
|
|
||||||
* seamless upgrade from v2.x to v3.x.
|
|
||||||
* During specific versions, it also dynamically switches the functionality
|
|
||||||
* to be either vanilla 3.x or 2.x compatible.
|
|
||||||
*
|
|
||||||
* It is responsible for creating:
|
|
||||||
* 1. LeaderDecider
|
|
||||||
* 2. LAM
|
|
||||||
* 3. WorkerMetricStatsReporter
|
|
||||||
*
|
|
||||||
* It manages initializing the following components at initialization time
|
|
||||||
* 1. workerMetricsDAO and workerMetricsManager
|
|
||||||
* 2. leaderDecider
|
|
||||||
* 3. MigrationAdaptiveLeaseAssignmentModeProvider
|
|
||||||
*
|
|
||||||
* It updates the following components dynamically:
|
|
||||||
* 1. starts/stops LAM
|
|
||||||
* 2. starts/stops WorkerMetricStatsReporter
|
|
||||||
* 3. updates LeaseAssignmentMode to either DEFAULT_LEASE_COUNT_BASED_ASSIGNMENT or WORKER_UTILIZATION_AWARE_ASSIGNMENT
|
|
||||||
* 4. creates GSI (deletion is done by KclMigrationTool)
|
|
||||||
* 5. creates WorkerMetricStats table (deletion is done by KclMigrationTool)
|
|
||||||
* 6. updates LeaderDecider to either DeterministicShuffleShardSyncLeaderDecider or DynamoDBLockBasedLeaderDecider
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
@ThreadSafe
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
public final class DynamicMigrationComponentsInitializer {
|
|
||||||
private static final long SCHEDULER_SHUTDOWN_TIMEOUT_SECONDS = 60L;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
private final MetricsFactory metricsFactory;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
private final LeaseRefresher leaseRefresher;
|
|
||||||
|
|
||||||
private final CoordinatorStateDAO coordinatorStateDAO;
|
|
||||||
private final ScheduledExecutorService workerMetricsThreadPool;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
private final WorkerMetricStatsDAO workerMetricsDAO;
|
|
||||||
|
|
||||||
private final WorkerMetricStatsManager workerMetricsManager;
|
|
||||||
private final ScheduledExecutorService lamThreadPool;
|
|
||||||
private final BiFunction<ScheduledExecutorService, LeaderDecider, LeaseAssignmentManager> lamCreator;
|
|
||||||
private final Supplier<MigrationAdaptiveLeaderDecider> adaptiveLeaderDeciderCreator;
|
|
||||||
private final Supplier<DeterministicShuffleShardSyncLeaderDecider> deterministicLeaderDeciderCreator;
|
|
||||||
private final Supplier<DynamoDBLockBasedLeaderDecider> ddbLockBasedLeaderDeciderCreator;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
private final String workerIdentifier;
|
|
||||||
|
|
||||||
private final WorkerUtilizationAwareAssignmentConfig workerUtilizationAwareAssignmentConfig;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
private final long workerMetricsExpirySeconds;
|
|
||||||
|
|
||||||
private final MigrationAdaptiveLeaseAssignmentModeProvider leaseModeChangeConsumer;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
private LeaderDecider leaderDecider;
|
|
||||||
|
|
||||||
private LeaseAssignmentManager leaseAssignmentManager;
|
|
||||||
private ScheduledFuture<?> workerMetricsReporterFuture;
|
|
||||||
private LeaseAssignmentMode currentAssignmentMode;
|
|
||||||
private boolean dualMode;
|
|
||||||
private boolean initialized;
|
|
||||||
|
|
||||||
@Builder(access = AccessLevel.PACKAGE)
|
|
||||||
DynamicMigrationComponentsInitializer(
|
|
||||||
final MetricsFactory metricsFactory,
|
|
||||||
final LeaseRefresher leaseRefresher,
|
|
||||||
final CoordinatorStateDAO coordinatorStateDAO,
|
|
||||||
final ScheduledExecutorService workerMetricsThreadPool,
|
|
||||||
final WorkerMetricStatsDAO workerMetricsDAO,
|
|
||||||
final WorkerMetricStatsManager workerMetricsManager,
|
|
||||||
final ScheduledExecutorService lamThreadPool,
|
|
||||||
final BiFunction<ScheduledExecutorService, LeaderDecider, LeaseAssignmentManager> lamCreator,
|
|
||||||
final Supplier<MigrationAdaptiveLeaderDecider> adaptiveLeaderDeciderCreator,
|
|
||||||
final Supplier<DeterministicShuffleShardSyncLeaderDecider> deterministicLeaderDeciderCreator,
|
|
||||||
final Supplier<DynamoDBLockBasedLeaderDecider> ddbLockBasedLeaderDeciderCreator,
|
|
||||||
final String workerIdentifier,
|
|
||||||
final WorkerUtilizationAwareAssignmentConfig workerUtilizationAwareAssignmentConfig,
|
|
||||||
final MigrationAdaptiveLeaseAssignmentModeProvider leaseAssignmentModeProvider) {
|
|
||||||
this.metricsFactory = metricsFactory;
|
|
||||||
this.leaseRefresher = leaseRefresher;
|
|
||||||
this.coordinatorStateDAO = coordinatorStateDAO;
|
|
||||||
this.workerIdentifier = workerIdentifier;
|
|
||||||
this.workerUtilizationAwareAssignmentConfig = workerUtilizationAwareAssignmentConfig;
|
|
||||||
this.workerMetricsExpirySeconds = Duration.ofMillis(DEFAULT_NO_OF_SKIP_STAT_FOR_DEAD_WORKER_THRESHOLD
|
|
||||||
* workerUtilizationAwareAssignmentConfig.workerMetricsReporterFreqInMillis())
|
|
||||||
.getSeconds();
|
|
||||||
this.workerMetricsManager = workerMetricsManager;
|
|
||||||
this.workerMetricsDAO = workerMetricsDAO;
|
|
||||||
this.workerMetricsThreadPool = workerMetricsThreadPool;
|
|
||||||
this.lamThreadPool = lamThreadPool;
|
|
||||||
this.lamCreator = lamCreator;
|
|
||||||
this.adaptiveLeaderDeciderCreator = adaptiveLeaderDeciderCreator;
|
|
||||||
this.deterministicLeaderDeciderCreator = deterministicLeaderDeciderCreator;
|
|
||||||
this.ddbLockBasedLeaderDeciderCreator = ddbLockBasedLeaderDeciderCreator;
|
|
||||||
this.leaseModeChangeConsumer = leaseAssignmentModeProvider;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void initialize(final ClientVersion migrationStateMachineStartingClientVersion) throws DependencyException {
|
|
||||||
if (initialized) {
|
|
||||||
log.info("Already initialized, nothing to do");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// always collect metrics so that when we flip to start reporting we will have accurate historical data.
|
|
||||||
log.info("Start collection of WorkerMetricStats");
|
|
||||||
workerMetricsManager.startManager();
|
|
||||||
if (migrationStateMachineStartingClientVersion == ClientVersion.CLIENT_VERSION_3X) {
|
|
||||||
initializeComponentsFor3x();
|
|
||||||
} else {
|
|
||||||
initializeComponentsForMigration(migrationStateMachineStartingClientVersion);
|
|
||||||
}
|
|
||||||
log.info("Initialized dual mode {} current assignment mode {}", dualMode, currentAssignmentMode);
|
|
||||||
|
|
||||||
log.info("Creating LAM");
|
|
||||||
leaseAssignmentManager = lamCreator.apply(lamThreadPool, leaderDecider);
|
|
||||||
log.info("Initializing {}", leaseModeChangeConsumer.getClass().getSimpleName());
|
|
||||||
leaseModeChangeConsumer.initialize(dualMode, currentAssignmentMode);
|
|
||||||
initialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void initializeComponentsFor3x() {
|
|
||||||
log.info("Initializing for 3x functionality");
|
|
||||||
dualMode = false;
|
|
||||||
currentAssignmentMode = WORKER_UTILIZATION_AWARE_ASSIGNMENT;
|
|
||||||
log.info("Initializing dualMode {} assignmentMode {}", dualMode, currentAssignmentMode);
|
|
||||||
leaderDecider = ddbLockBasedLeaderDeciderCreator.get();
|
|
||||||
log.info("Initializing {}", leaderDecider.getClass().getSimpleName());
|
|
||||||
leaderDecider.initialize();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void initializeComponentsForMigration(final ClientVersion migrationStateMachineStartingClientVersion) {
|
|
||||||
log.info("Initializing for migration to 3x");
|
|
||||||
dualMode = true;
|
|
||||||
final LeaderDecider initialLeaderDecider;
|
|
||||||
if (migrationStateMachineStartingClientVersion == ClientVersion.CLIENT_VERSION_3X_WITH_ROLLBACK) {
|
|
||||||
currentAssignmentMode = WORKER_UTILIZATION_AWARE_ASSIGNMENT;
|
|
||||||
initialLeaderDecider = ddbLockBasedLeaderDeciderCreator.get();
|
|
||||||
} else {
|
|
||||||
currentAssignmentMode = DEFAULT_LEASE_COUNT_BASED_ASSIGNMENT;
|
|
||||||
initialLeaderDecider = deterministicLeaderDeciderCreator.get();
|
|
||||||
}
|
|
||||||
log.info("Initializing dualMode {} assignmentMode {}", dualMode, currentAssignmentMode);
|
|
||||||
|
|
||||||
final MigrationAdaptiveLeaderDecider adaptiveLeaderDecider = adaptiveLeaderDeciderCreator.get();
|
|
||||||
log.info(
|
|
||||||
"Initializing MigrationAdaptiveLeaderDecider with {}",
|
|
||||||
initialLeaderDecider.getClass().getSimpleName());
|
|
||||||
adaptiveLeaderDecider.updateLeaderDecider(initialLeaderDecider);
|
|
||||||
this.leaderDecider = adaptiveLeaderDecider;
|
|
||||||
}
|
|
||||||
|
|
||||||
void shutdown() {
|
|
||||||
log.info("Shutting down components");
|
|
||||||
if (initialized) {
|
|
||||||
log.info("Stopping LAM, LeaderDecider, workerMetrics reporting and collection");
|
|
||||||
leaseAssignmentManager.stop();
|
|
||||||
// leader decider is shut down later when scheduler is doing a final shutdown
|
|
||||||
// since scheduler still accesses the leader decider while shutting down
|
|
||||||
stopWorkerMetricsReporter();
|
|
||||||
workerMetricsManager.stopManager();
|
|
||||||
}
|
|
||||||
|
|
||||||
// lam does not manage lifecycle of its threadpool to easily stop/start dynamically.
|
|
||||||
// once migration code is obsolete (i.e. all 3x functionality is the baseline and no
|
|
||||||
// migration is needed), it can be moved inside lam
|
|
||||||
log.info("Shutting down lamThreadPool and workerMetrics reporter thread pool");
|
|
||||||
lamThreadPool.shutdown();
|
|
||||||
workerMetricsThreadPool.shutdown();
|
|
||||||
try {
|
|
||||||
if (!lamThreadPool.awaitTermination(SCHEDULER_SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS)) {
|
|
||||||
log.info(
|
|
||||||
"LamThreadPool did not shutdown in {}s, forcefully shutting down",
|
|
||||||
SCHEDULER_SHUTDOWN_TIMEOUT_SECONDS);
|
|
||||||
lamThreadPool.shutdownNow();
|
|
||||||
}
|
|
||||||
} catch (final InterruptedException e) {
|
|
||||||
log.warn("Interrupted while waiting for shutdown of LeaseAssignmentManager ThreadPool", e);
|
|
||||||
lamThreadPool.shutdownNow();
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (!workerMetricsThreadPool.awaitTermination(SCHEDULER_SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS)) {
|
|
||||||
log.info(
|
|
||||||
"WorkerMetricsThreadPool did not shutdown in {}s, forcefully shutting down",
|
|
||||||
SCHEDULER_SHUTDOWN_TIMEOUT_SECONDS);
|
|
||||||
workerMetricsThreadPool.shutdownNow();
|
|
||||||
}
|
|
||||||
} catch (final InterruptedException e) {
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
log.warn("Interrupted while waiting for shutdown of WorkerMetricStatsManager ThreadPool", e);
|
|
||||||
workerMetricsThreadPool.shutdownNow();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void startWorkerMetricsReporting() throws DependencyException {
|
|
||||||
if (workerMetricsReporterFuture != null) {
|
|
||||||
log.info("Worker metrics reporting is already running...");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
log.info("Initializing WorkerMetricStats");
|
|
||||||
this.workerMetricsDAO.initialize();
|
|
||||||
log.info("Starting worker metrics reporter");
|
|
||||||
// Start with a delay for workerStatsManager to capture some values and start reporting.
|
|
||||||
workerMetricsReporterFuture = workerMetricsThreadPool.scheduleAtFixedRate(
|
|
||||||
new WorkerMetricStatsReporter(metricsFactory, workerIdentifier, workerMetricsManager, workerMetricsDAO),
|
|
||||||
workerUtilizationAwareAssignmentConfig.inMemoryWorkerMetricsCaptureFrequencyMillis() * 2L,
|
|
||||||
workerUtilizationAwareAssignmentConfig.workerMetricsReporterFreqInMillis(),
|
|
||||||
TimeUnit.MILLISECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void stopWorkerMetricsReporter() {
|
|
||||||
log.info("Stopping worker metrics reporter");
|
|
||||||
if (workerMetricsReporterFuture != null) {
|
|
||||||
workerMetricsReporterFuture.cancel(false);
|
|
||||||
workerMetricsReporterFuture = null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create LeaseOwnerToLeaseKey GSI for the lease table
|
|
||||||
* @param blockingWait whether to wait for the GSI creation or not, if false, the gsi creation will be initiated
|
|
||||||
* but this call will not block for its creation
|
|
||||||
* @throws DependencyException If DDB fails unexpectedly when creating the GSI
|
|
||||||
*/
|
|
||||||
private void createGsi(final boolean blockingWait) throws DependencyException {
|
|
||||||
log.info("Creating Lease table GSI if it does not exist");
|
|
||||||
// KCLv3.0 always starts with GSI available
|
|
||||||
leaseRefresher.createLeaseOwnerToLeaseKeyIndexIfNotExists();
|
|
||||||
|
|
||||||
if (blockingWait) {
|
|
||||||
log.info("Waiting for Lease table GSI creation");
|
|
||||||
final long secondsBetweenPolls = 10L;
|
|
||||||
final long timeoutSeconds = 600L;
|
|
||||||
final boolean isIndexActive =
|
|
||||||
leaseRefresher.waitUntilLeaseOwnerToLeaseKeyIndexExists(secondsBetweenPolls, timeoutSeconds);
|
|
||||||
|
|
||||||
if (!isIndexActive) {
|
|
||||||
throw new DependencyException(
|
|
||||||
new IllegalStateException("Creating LeaseOwnerToLeaseKeyIndex on Lease table timed out"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize KCL with components and configuration to support upgrade from 2x. This can happen
|
|
||||||
* at KCL Worker startup when MigrationStateMachine starts in ClientVersion.CLIENT_VERSION_UPGRADE_FROM_2X.
|
|
||||||
* Or Dynamically during roll-forward from ClientVersion.CLIENT_VERSION_2X.
|
|
||||||
*/
|
|
||||||
public synchronized void initializeClientVersionForUpgradeFrom2x(final ClientVersion fromClientVersion)
|
|
||||||
throws DependencyException {
|
|
||||||
log.info("Initializing KCL components for upgrade from 2x from {}", fromClientVersion);
|
|
||||||
|
|
||||||
createGsi(false);
|
|
||||||
startWorkerMetricsReporting();
|
|
||||||
// LAM is not started until the dynamic flip to 3xWithRollback
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize KCL with components and configuration to run vanilla 3x functionality. This can happen
|
|
||||||
* at KCL Worker startup when MigrationStateMachine starts in ClientVersion.CLIENT_VERSION_3X, or dynamically
|
|
||||||
* during a new deployment when existing worker are in ClientVersion.CLIENT_VERSION_3X_WITH_ROLLBACK
|
|
||||||
*/
|
|
||||||
public synchronized void initializeClientVersionFor3x(final ClientVersion fromClientVersion)
|
|
||||||
throws DependencyException {
|
|
||||||
log.info("Initializing KCL components for 3x from {}", fromClientVersion);
|
|
||||||
|
|
||||||
log.info("Initializing LeaseAssignmentManager, DDB-lock-based leader decider, WorkerMetricStats manager"
|
|
||||||
+ " and creating the Lease table GSI if it does not exist");
|
|
||||||
if (fromClientVersion == ClientVersion.CLIENT_VERSION_INIT) {
|
|
||||||
// gsi may already exist and be active for migrated application.
|
|
||||||
createGsi(true);
|
|
||||||
startWorkerMetricsReporting();
|
|
||||||
log.info("Starting LAM");
|
|
||||||
leaseAssignmentManager.start();
|
|
||||||
}
|
|
||||||
// nothing to do when transitioning from CLIENT_VERSION_3X_WITH_ROLLBACK.
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize KCL with components and configuration to run 2x compatible functionality
|
|
||||||
* while allowing roll-forward. This can happen at KCL Worker startup when MigrationStateMachine
|
|
||||||
* starts in ClientVersion.CLIENT_VERSION_2X (after a rollback)
|
|
||||||
* Or Dynamically during rollback from CLIENT_VERSION_UPGRADE_FROM_2X or CLIENT_VERSION_3X_WITH_ROLLBACK.
|
|
||||||
*/
|
|
||||||
public synchronized void initializeClientVersionFor2x(final ClientVersion fromClientVersion) {
|
|
||||||
log.info("Initializing KCL components for rollback to 2x from {}", fromClientVersion);
|
|
||||||
|
|
||||||
if (fromClientVersion != ClientVersion.CLIENT_VERSION_INIT) {
|
|
||||||
// dynamic rollback
|
|
||||||
stopWorkerMetricsReporter();
|
|
||||||
// Migration Tool will delete the lease table LeaseOwner GSI
|
|
||||||
// and WorkerMetricStats table
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fromClientVersion == ClientVersion.CLIENT_VERSION_3X_WITH_ROLLBACK) {
|
|
||||||
// we are rolling back after flip
|
|
||||||
currentAssignmentMode = DEFAULT_LEASE_COUNT_BASED_ASSIGNMENT;
|
|
||||||
notifyLeaseAssignmentModeChange();
|
|
||||||
log.info("Stopping LAM");
|
|
||||||
leaseAssignmentManager.stop();
|
|
||||||
final LeaderDecider leaderDecider = deterministicLeaderDeciderCreator.get();
|
|
||||||
if (this.leaderDecider instanceof MigrationAdaptiveLeaderDecider) {
|
|
||||||
log.info(
|
|
||||||
"Updating LeaderDecider to {}", leaderDecider.getClass().getSimpleName());
|
|
||||||
((MigrationAdaptiveLeaderDecider) this.leaderDecider).updateLeaderDecider(leaderDecider);
|
|
||||||
} else {
|
|
||||||
throw new IllegalStateException(String.format("Unexpected leader decider %s", this.leaderDecider));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize KCL with components and configuration to run vanilla 3x functionality
|
|
||||||
* while allowing roll-back to 2x functionality. This can happen at KCL Worker startup
|
|
||||||
* when MigrationStateMachine starts in ClientVersion.CLIENT_VERSION_3X_WITH_ROLLBACK (after the flip)
|
|
||||||
* Or Dynamically during flip from CLIENT_VERSION_UPGRADE_FROM_2X.
|
|
||||||
*/
|
|
||||||
public synchronized void initializeClientVersionFor3xWithRollback(final ClientVersion fromClientVersion)
|
|
||||||
throws DependencyException {
|
|
||||||
log.info("Initializing KCL components for 3x with rollback from {}", fromClientVersion);
|
|
||||||
|
|
||||||
if (fromClientVersion == ClientVersion.CLIENT_VERSION_UPGRADE_FROM_2X) {
|
|
||||||
// dynamic flip
|
|
||||||
currentAssignmentMode = WORKER_UTILIZATION_AWARE_ASSIGNMENT;
|
|
||||||
notifyLeaseAssignmentModeChange();
|
|
||||||
final LeaderDecider leaderDecider = ddbLockBasedLeaderDeciderCreator.get();
|
|
||||||
log.info("Updating LeaderDecider to {}", leaderDecider.getClass().getSimpleName());
|
|
||||||
((MigrationAdaptiveLeaderDecider) this.leaderDecider).updateLeaderDecider(leaderDecider);
|
|
||||||
} else {
|
|
||||||
startWorkerMetricsReporting();
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info("Starting LAM");
|
|
||||||
leaseAssignmentManager.start();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Synchronously invoke the consumer to change the lease assignment mode.
|
|
||||||
*/
|
|
||||||
private void notifyLeaseAssignmentModeChange() {
|
|
||||||
if (dualMode) {
|
|
||||||
log.info("Notifying {} of {}", leaseModeChangeConsumer, currentAssignmentMode);
|
|
||||||
if (Objects.nonNull(leaseModeChangeConsumer)) {
|
|
||||||
try {
|
|
||||||
leaseModeChangeConsumer.updateLeaseAssignmentMode(currentAssignmentMode);
|
|
||||||
} catch (final Exception e) {
|
|
||||||
log.warn("LeaseAssignmentMode change consumer threw exception", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw new IllegalStateException("Unexpected assignment mode change");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,161 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.util.concurrent.CompletableFuture;
|
|
||||||
import java.util.concurrent.CompletionException;
|
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BatchGetItemRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.BatchWriteItemResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.CreateTableResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.DeleteItemResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.DeleteTableRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.DeleteTableResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.DescribeTableResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.GetItemRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.GetItemResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.PutItemRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.PutItemResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.QueryRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.QueryResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ScanRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.ScanResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.paginators.BatchGetItemIterable;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.paginators.QueryIterable;
|
|
||||||
import software.amazon.awssdk.services.dynamodb.paginators.ScanIterable;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* DDB Lock client depends on DynamoDbClient and KCL only has DynamoDbAsyncClient configured.
|
|
||||||
* This wrapper delegates APIs from sync client to async client internally so that it can
|
|
||||||
* be used with the DDB Lock client.
|
|
||||||
*/
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public class DynamoDbAsyncToSyncClientAdapter implements DynamoDbClient {
|
|
||||||
private final DynamoDbAsyncClient asyncClient;
|
|
||||||
|
|
||||||
public DynamoDbAsyncToSyncClientAdapter(final DynamoDbAsyncClient asyncClient) {
|
|
||||||
this.asyncClient = asyncClient;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String serviceName() {
|
|
||||||
return asyncClient.serviceName();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
asyncClient.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
private <T> T handleException(final Supplier<CompletableFuture<T>> task) {
|
|
||||||
try {
|
|
||||||
return task.get().join();
|
|
||||||
} catch (final CompletionException e) {
|
|
||||||
rethrow(e.getCause());
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public CreateTableResponse createTable(final CreateTableRequest request) {
|
|
||||||
return handleException(() -> asyncClient.createTable(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DescribeTableResponse describeTable(final DescribeTableRequest request) {
|
|
||||||
return handleException(() -> asyncClient.describeTable(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DeleteTableResponse deleteTable(final DeleteTableRequest request) {
|
|
||||||
return handleException(() -> asyncClient.deleteTable(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DeleteItemResponse deleteItem(final DeleteItemRequest request) {
|
|
||||||
return handleException(() -> asyncClient.deleteItem(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public GetItemResponse getItem(final GetItemRequest request) {
|
|
||||||
return handleException(() -> asyncClient.getItem(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public PutItemResponse putItem(final PutItemRequest request) {
|
|
||||||
return handleException(() -> asyncClient.putItem(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public UpdateItemResponse updateItem(final UpdateItemRequest request) {
|
|
||||||
return handleException(() -> asyncClient.updateItem(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public QueryResponse query(final QueryRequest request) {
|
|
||||||
return handleException(() -> asyncClient.query(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ScanResponse scan(final ScanRequest request) {
|
|
||||||
return handleException(() -> asyncClient.scan(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public QueryIterable queryPaginator(final QueryRequest request) {
|
|
||||||
return new QueryIterable(this, request);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ScanIterable scanPaginator(final ScanRequest request) {
|
|
||||||
return new ScanIterable(this, request);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BatchGetItemResponse batchGetItem(final BatchGetItemRequest request) {
|
|
||||||
return handleException(() -> asyncClient.batchGetItem(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BatchWriteItemResponse batchWriteItem(final BatchWriteItemRequest request) {
|
|
||||||
return handleException(() -> asyncClient.batchWriteItem(request));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BatchGetItemIterable batchGetItemPaginator(final BatchGetItemRequest request) {
|
|
||||||
return new BatchGetItemIterable(this, request);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void rethrow(final Throwable e) {
|
|
||||||
castAndThrow(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
private static <T extends Throwable> void castAndThrow(final Throwable e) throws T {
|
|
||||||
throw (T) e;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.ToString;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.leases.LeaseCoordinator;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
@ToString(exclude = "isThreadPoolExecutor")
|
|
||||||
@Slf4j
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public class ExecutorStateEvent implements DiagnosticEvent {
|
|
||||||
private static final String MESSAGE = "Current thread pool executor state: ";
|
|
||||||
|
|
||||||
private boolean isThreadPoolExecutor;
|
|
||||||
private String executorName;
|
|
||||||
private int currentQueueSize;
|
|
||||||
private int activeThreads;
|
|
||||||
private int coreThreads;
|
|
||||||
private int leasesOwned;
|
|
||||||
private int largestPoolSize;
|
|
||||||
private int maximumPoolSize;
|
|
||||||
|
|
||||||
ExecutorStateEvent(ExecutorService executor, LeaseCoordinator leaseCoordinator) {
|
|
||||||
this(executor);
|
|
||||||
this.leasesOwned = leaseCoordinator.getAssignments().size();
|
|
||||||
}
|
|
||||||
|
|
||||||
public ExecutorStateEvent(ExecutorService executor) {
|
|
||||||
if (executor instanceof ThreadPoolExecutor) {
|
|
||||||
this.isThreadPoolExecutor = true;
|
|
||||||
|
|
||||||
ThreadPoolExecutor ex = (ThreadPoolExecutor) executor;
|
|
||||||
this.executorName = ex.getClass().getSimpleName();
|
|
||||||
this.currentQueueSize = ex.getQueue().size();
|
|
||||||
this.activeThreads = ex.getActiveCount();
|
|
||||||
this.coreThreads = ex.getCorePoolSize();
|
|
||||||
this.largestPoolSize = ex.getLargestPoolSize();
|
|
||||||
this.maximumPoolSize = ex.getMaximumPoolSize();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void accept(DiagnosticEventHandler visitor) {
|
|
||||||
// logging is only meaningful for a ThreadPoolExecutor executor service (default config)
|
|
||||||
if (isThreadPoolExecutor) {
|
|
||||||
visitor.visit(this);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String message() {
|
|
||||||
return MESSAGE + this.toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,126 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import lombok.NoArgsConstructor;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.awssdk.annotations.ThreadSafe;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Provides the lease assignment mode KCL must operate in during migration
|
|
||||||
* from 2.x to 3.x.
|
|
||||||
* KCL v2.x lease assignment is based on distributed-worker-stealing algorithm
|
|
||||||
* which balances lease count across workers.
|
|
||||||
* KCL v3.x lease assignment is based on a centralized-lease-assignment algorithm
|
|
||||||
* which balances resource utilization metrics(e.g. CPU utilization) across workers.
|
|
||||||
*
|
|
||||||
* For a new application starting in KCL v3.x, there is no migration needed,
|
|
||||||
* so KCL will initialize with the lease assignment mode accordingly, and it will
|
|
||||||
* not change dynamically.
|
|
||||||
*
|
|
||||||
* During upgrade from 2.x to 3.x, KCL library needs an ability to
|
|
||||||
* start in v2.x assignment mode but dynamically change to v3.x assignment.
|
|
||||||
* In this case, both 2.x and 3.x lease assignment will be running but one
|
|
||||||
* of them will be a no-op based on the mode.
|
|
||||||
*
|
|
||||||
* The methods and internal state is guarded for concurrent access to allow
|
|
||||||
* both lease assignment algorithms to access the state concurrently while
|
|
||||||
* it could be dynamically updated.
|
|
||||||
*/
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
@Slf4j
|
|
||||||
@ThreadSafe
|
|
||||||
@NoArgsConstructor
|
|
||||||
public final class MigrationAdaptiveLeaseAssignmentModeProvider {
|
|
||||||
|
|
||||||
public enum LeaseAssignmentMode {
|
|
||||||
/**
|
|
||||||
* This is the 2.x assignment mode.
|
|
||||||
* This mode assigns leases based on the number of leases.
|
|
||||||
* This mode involves each worker independently determining how many leases to pick or how many leases to steal
|
|
||||||
* from other workers.
|
|
||||||
*/
|
|
||||||
DEFAULT_LEASE_COUNT_BASED_ASSIGNMENT,
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is the 3.x assigment mode.
|
|
||||||
* This mode uses each worker's resource utilization to perform lease assignment.
|
|
||||||
* Assignment is done by a single worker (elected leader), which looks at WorkerMetricStats for each worker to
|
|
||||||
* determine lease assignment.
|
|
||||||
*
|
|
||||||
* This mode primarily does
|
|
||||||
* 1. Starts WorkerMetricStatsManager on the worker which starts publishing WorkerMetricStats
|
|
||||||
* 2. Starts the LeaseDiscoverer
|
|
||||||
* 3. Creates if not already available the LeaseOwnerToLeaseKey GSI on the lease table and validate that is
|
|
||||||
* ACTIVE.
|
|
||||||
*/
|
|
||||||
WORKER_UTILIZATION_AWARE_ASSIGNMENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
private LeaseAssignmentMode currentMode;
|
|
||||||
private boolean initialized = false;
|
|
||||||
private boolean dynamicModeChangeSupportNeeded;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Specify whether both lease assignment algorithms should be initialized to
|
|
||||||
* support dynamically changing lease mode.
|
|
||||||
* @return true if lease assignment mode can change dynamically
|
|
||||||
* false otherwise.
|
|
||||||
*/
|
|
||||||
public synchronized boolean dynamicModeChangeSupportNeeded() {
|
|
||||||
return dynamicModeChangeSupportNeeded;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Provide the current lease assignment mode in which KCL should perform lease assignment
|
|
||||||
* @return the current lease assignment mode
|
|
||||||
*/
|
|
||||||
public synchronized LeaseAssignmentMode getLeaseAssignmentMode() {
|
|
||||||
if (!initialized) {
|
|
||||||
throw new IllegalStateException("AssignmentMode is not initialized");
|
|
||||||
}
|
|
||||||
return currentMode;
|
|
||||||
}
|
|
||||||
|
|
||||||
synchronized void initialize(final boolean dynamicModeChangeSupportNeeded, final LeaseAssignmentMode mode) {
|
|
||||||
if (!initialized) {
|
|
||||||
log.info("Initializing dynamicModeChangeSupportNeeded {} mode {}", dynamicModeChangeSupportNeeded, mode);
|
|
||||||
this.dynamicModeChangeSupportNeeded = dynamicModeChangeSupportNeeded;
|
|
||||||
this.currentMode = mode;
|
|
||||||
this.initialized = true;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
log.info(
|
|
||||||
"Already initialized dynamicModeChangeSupportNeeded {} mode {}. Ignoring new values {}, {}",
|
|
||||||
this.dynamicModeChangeSupportNeeded,
|
|
||||||
this.currentMode,
|
|
||||||
dynamicModeChangeSupportNeeded,
|
|
||||||
mode);
|
|
||||||
}
|
|
||||||
|
|
||||||
synchronized void updateLeaseAssignmentMode(final LeaseAssignmentMode mode) {
|
|
||||||
if (!initialized) {
|
|
||||||
throw new IllegalStateException("Cannot change mode before initializing");
|
|
||||||
}
|
|
||||||
if (dynamicModeChangeSupportNeeded) {
|
|
||||||
log.info("Changing Lease assignment mode from {} to {}", currentMode, mode);
|
|
||||||
this.currentMode = mode;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
throw new IllegalStateException(String.format(
|
|
||||||
"Lease assignment mode already initialized to %s cannot" + " change to %s", this.currentMode, mode));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,26 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
public class NoOpWorkerStateChangeListener implements WorkerStateChangeListener {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Empty constructor for NoOp Worker State Change Listener
|
|
||||||
*/
|
|
||||||
public NoOpWorkerStateChangeListener() {}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onWorkerStateChange(WorkerState newState) {}
|
|
||||||
}
|
|
||||||
|
|
@ -1,584 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
import java.math.BigInteger;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.Executors;
|
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
|
||||||
import java.util.function.Function;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import com.google.common.collect.ComparisonChain;
|
|
||||||
import lombok.AccessLevel;
|
|
||||||
import lombok.EqualsAndHashCode;
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.NonNull;
|
|
||||||
import lombok.Value;
|
|
||||||
import lombok.experimental.Accessors;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.commons.lang3.Validate;
|
|
||||||
import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
|
|
||||||
import software.amazon.awssdk.services.kinesis.model.Shard;
|
|
||||||
import software.amazon.awssdk.utils.CollectionUtils;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.common.HashKeyRangeForLease;
|
|
||||||
import software.amazon.kinesis.common.StreamConfig;
|
|
||||||
import software.amazon.kinesis.common.StreamIdentifier;
|
|
||||||
import software.amazon.kinesis.leases.Lease;
|
|
||||||
import software.amazon.kinesis.leases.LeaseRefresher;
|
|
||||||
import software.amazon.kinesis.leases.MultiStreamLease;
|
|
||||||
import software.amazon.kinesis.leases.ShardDetector;
|
|
||||||
import software.amazon.kinesis.leases.ShardSyncTaskManager;
|
|
||||||
import software.amazon.kinesis.leases.UpdateField;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.DependencyException;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.InvalidStateException;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException;
|
|
||||||
import software.amazon.kinesis.lifecycle.TaskResult;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsFactory;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsLevel;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsScope;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsUtil;
|
|
||||||
|
|
||||||
import static software.amazon.kinesis.common.HashKeyRangeForLease.fromHashKeyRange;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The top level orchestrator for coordinating the periodic shard sync related
|
|
||||||
* activities.
|
|
||||||
*/
|
|
||||||
@Getter
|
|
||||||
@EqualsAndHashCode
|
|
||||||
@Slf4j
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
class PeriodicShardSyncManager {
|
|
||||||
private static final long INITIAL_DELAY = 60 * 1000L;
|
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
static final BigInteger MIN_HASH_KEY = BigInteger.ZERO;
|
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
static final BigInteger MAX_HASH_KEY = new BigInteger("2").pow(128).subtract(BigInteger.ONE);
|
|
||||||
|
|
||||||
static final String PERIODIC_SHARD_SYNC_MANAGER = "PeriodicShardSyncManager";
|
|
||||||
private final Map<StreamIdentifier, HashRangeHoleTracker> hashRangeHoleTrackerMap = new HashMap<>();
|
|
||||||
|
|
||||||
private final String workerId;
|
|
||||||
private LeaderDecider leaderDecider;
|
|
||||||
private final LeaseRefresher leaseRefresher;
|
|
||||||
private final Map<StreamIdentifier, StreamConfig> currentStreamConfigMap;
|
|
||||||
private final Function<StreamConfig, ShardSyncTaskManager> shardSyncTaskManagerProvider;
|
|
||||||
private final Map<StreamConfig, ShardSyncTaskManager> streamToShardSyncTaskManagerMap;
|
|
||||||
private final ScheduledExecutorService shardSyncThreadPool;
|
|
||||||
private final boolean isMultiStreamingMode;
|
|
||||||
private final MetricsFactory metricsFactory;
|
|
||||||
private final long leasesRecoveryAuditorExecutionFrequencyMillis;
|
|
||||||
private final int leasesRecoveryAuditorInconsistencyConfidenceThreshold;
|
|
||||||
|
|
||||||
@Getter(AccessLevel.NONE)
|
|
||||||
private final AtomicBoolean leaderSynced;
|
|
||||||
|
|
||||||
private boolean isRunning;
|
|
||||||
|
|
||||||
PeriodicShardSyncManager(
|
|
||||||
String workerId,
|
|
||||||
LeaseRefresher leaseRefresher,
|
|
||||||
Map<StreamIdentifier, StreamConfig> currentStreamConfigMap,
|
|
||||||
Function<StreamConfig, ShardSyncTaskManager> shardSyncTaskManagerProvider,
|
|
||||||
Map<StreamConfig, ShardSyncTaskManager> streamToShardSyncTaskManagerMap,
|
|
||||||
boolean isMultiStreamingMode,
|
|
||||||
MetricsFactory metricsFactory,
|
|
||||||
long leasesRecoveryAuditorExecutionFrequencyMillis,
|
|
||||||
int leasesRecoveryAuditorInconsistencyConfidenceThreshold,
|
|
||||||
AtomicBoolean leaderSynced) {
|
|
||||||
this(
|
|
||||||
workerId,
|
|
||||||
leaseRefresher,
|
|
||||||
currentStreamConfigMap,
|
|
||||||
shardSyncTaskManagerProvider,
|
|
||||||
streamToShardSyncTaskManagerMap,
|
|
||||||
Executors.newSingleThreadScheduledExecutor(),
|
|
||||||
isMultiStreamingMode,
|
|
||||||
metricsFactory,
|
|
||||||
leasesRecoveryAuditorExecutionFrequencyMillis,
|
|
||||||
leasesRecoveryAuditorInconsistencyConfidenceThreshold,
|
|
||||||
leaderSynced);
|
|
||||||
}
|
|
||||||
|
|
||||||
PeriodicShardSyncManager(
|
|
||||||
String workerId,
|
|
||||||
LeaseRefresher leaseRefresher,
|
|
||||||
Map<StreamIdentifier, StreamConfig> currentStreamConfigMap,
|
|
||||||
Function<StreamConfig, ShardSyncTaskManager> shardSyncTaskManagerProvider,
|
|
||||||
Map<StreamConfig, ShardSyncTaskManager> streamToShardSyncTaskManagerMap,
|
|
||||||
ScheduledExecutorService shardSyncThreadPool,
|
|
||||||
boolean isMultiStreamingMode,
|
|
||||||
MetricsFactory metricsFactory,
|
|
||||||
long leasesRecoveryAuditorExecutionFrequencyMillis,
|
|
||||||
int leasesRecoveryAuditorInconsistencyConfidenceThreshold,
|
|
||||||
AtomicBoolean leaderSynced) {
|
|
||||||
Validate.notBlank(workerId, "WorkerID is required to initialize PeriodicShardSyncManager.");
|
|
||||||
this.workerId = workerId;
|
|
||||||
this.leaseRefresher = leaseRefresher;
|
|
||||||
this.currentStreamConfigMap = currentStreamConfigMap;
|
|
||||||
this.shardSyncTaskManagerProvider = shardSyncTaskManagerProvider;
|
|
||||||
this.streamToShardSyncTaskManagerMap = streamToShardSyncTaskManagerMap;
|
|
||||||
this.shardSyncThreadPool = shardSyncThreadPool;
|
|
||||||
this.isMultiStreamingMode = isMultiStreamingMode;
|
|
||||||
this.metricsFactory = metricsFactory;
|
|
||||||
this.leasesRecoveryAuditorExecutionFrequencyMillis = leasesRecoveryAuditorExecutionFrequencyMillis;
|
|
||||||
this.leasesRecoveryAuditorInconsistencyConfidenceThreshold =
|
|
||||||
leasesRecoveryAuditorInconsistencyConfidenceThreshold;
|
|
||||||
this.leaderSynced = leaderSynced;
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized TaskResult start(final LeaderDecider leaderDecider) {
|
|
||||||
Validate.notNull(leaderDecider, "LeaderDecider is required to start PeriodicShardSyncManager.");
|
|
||||||
this.leaderDecider = leaderDecider;
|
|
||||||
if (!isRunning) {
|
|
||||||
final Runnable periodicShardSyncer = () -> {
|
|
||||||
try {
|
|
||||||
runShardSync();
|
|
||||||
} catch (Throwable t) {
|
|
||||||
log.error("Error during runShardSync.", t);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
shardSyncThreadPool.scheduleWithFixedDelay(
|
|
||||||
periodicShardSyncer,
|
|
||||||
INITIAL_DELAY,
|
|
||||||
leasesRecoveryAuditorExecutionFrequencyMillis,
|
|
||||||
TimeUnit.MILLISECONDS);
|
|
||||||
isRunning = true;
|
|
||||||
}
|
|
||||||
return new TaskResult(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Runs shardSync once
|
|
||||||
* Does not schedule periodic shardSync
|
|
||||||
*/
|
|
||||||
public synchronized void syncShardsOnce() throws Exception {
|
|
||||||
// TODO: Resume the shard sync from failed stream in the next attempt, to avoid syncing
|
|
||||||
// TODO: for already synced streams
|
|
||||||
for (StreamConfig streamConfig : currentStreamConfigMap.values()) {
|
|
||||||
log.info("Syncing Kinesis shard info for {}", streamConfig);
|
|
||||||
final ShardSyncTaskManager shardSyncTaskManager = shardSyncTaskManagerProvider.apply(streamConfig);
|
|
||||||
final TaskResult taskResult = shardSyncTaskManager.callShardSyncTask();
|
|
||||||
if (taskResult.getException() != null) {
|
|
||||||
throw taskResult.getException();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void stop() {
|
|
||||||
if (isRunning) {
|
|
||||||
log.info(String.format("Shutting down leader decider on worker %s", workerId));
|
|
||||||
leaderDecider.shutdown();
|
|
||||||
log.info(String.format("Shutting down periodic shard sync task scheduler on worker %s", workerId));
|
|
||||||
shardSyncThreadPool.shutdown();
|
|
||||||
isRunning = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void runShardSync() {
|
|
||||||
if (leaderDecider.isLeader(workerId) && leaderSynced.get()) {
|
|
||||||
log.info(String.format("WorkerId %s is leader, running the periodic shard sync task", workerId));
|
|
||||||
|
|
||||||
final MetricsScope scope =
|
|
||||||
MetricsUtil.createMetricsWithOperation(metricsFactory, PERIODIC_SHARD_SYNC_MANAGER);
|
|
||||||
int numStreamsWithPartialLeases = 0;
|
|
||||||
int numStreamsToSync = 0;
|
|
||||||
int numSkippedShardSyncTask = 0;
|
|
||||||
boolean isRunSuccess = false;
|
|
||||||
final long runStartMillis = System.currentTimeMillis();
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Create a copy of the streams to be considered for this run to avoid data race with Scheduler.
|
|
||||||
final Set<StreamIdentifier> streamConfigMap = new HashSet<>(currentStreamConfigMap.keySet());
|
|
||||||
|
|
||||||
// Construct the stream to leases map to be used in the lease sync
|
|
||||||
final Map<StreamIdentifier, List<Lease>> streamToLeasesMap = getStreamToLeasesMap(streamConfigMap);
|
|
||||||
|
|
||||||
// For each of the stream, check if shard sync needs to be done based on the leases state.
|
|
||||||
for (StreamIdentifier streamIdentifier : streamConfigMap) {
|
|
||||||
if (!currentStreamConfigMap.containsKey(streamIdentifier)) {
|
|
||||||
log.info("Skipping shard sync task for {} as stream is purged", streamIdentifier);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
final ShardSyncResponse shardSyncResponse =
|
|
||||||
checkForShardSync(streamIdentifier, streamToLeasesMap.get(streamIdentifier));
|
|
||||||
|
|
||||||
numStreamsWithPartialLeases += shardSyncResponse.isHoleDetected() ? 1 : 0;
|
|
||||||
numStreamsToSync += shardSyncResponse.shouldDoShardSync ? 1 : 0;
|
|
||||||
|
|
||||||
if (shardSyncResponse.shouldDoShardSync()) {
|
|
||||||
log.info(
|
|
||||||
"Periodic shard syncer initiating shard sync for {} due to the reason - {} ",
|
|
||||||
streamIdentifier,
|
|
||||||
shardSyncResponse.reasonForDecision());
|
|
||||||
final StreamConfig streamConfig = currentStreamConfigMap.get(streamIdentifier);
|
|
||||||
if (streamConfig == null) {
|
|
||||||
log.info("Skipping shard sync task for {} as stream is purged", streamIdentifier);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
final ShardSyncTaskManager shardSyncTaskManager;
|
|
||||||
if (streamToShardSyncTaskManagerMap.containsKey(streamConfig)) {
|
|
||||||
log.info(
|
|
||||||
"shardSyncTaskManager for stream {} already exists", streamIdentifier.streamName());
|
|
||||||
shardSyncTaskManager = streamToShardSyncTaskManagerMap.get(streamConfig);
|
|
||||||
} else {
|
|
||||||
// If streamConfig of a stream has already been added to currentStreamConfigMap but
|
|
||||||
// Scheduler failed to create shardSyncTaskManager for it, then Scheduler will not try
|
|
||||||
// to create one later. So enable PeriodicShardSyncManager to do it for such cases
|
|
||||||
log.info(
|
|
||||||
"Failed to get shardSyncTaskManager so creating one for stream {}.",
|
|
||||||
streamIdentifier.streamName());
|
|
||||||
shardSyncTaskManager = streamToShardSyncTaskManagerMap.computeIfAbsent(
|
|
||||||
streamConfig, s -> shardSyncTaskManagerProvider.apply(s));
|
|
||||||
}
|
|
||||||
if (!shardSyncTaskManager.submitShardSyncTask()) {
|
|
||||||
log.warn(
|
|
||||||
"Failed to submit shard sync task for stream {}. This could be due to the previous pending shard sync task.",
|
|
||||||
shardSyncTaskManager
|
|
||||||
.shardDetector()
|
|
||||||
.streamIdentifier()
|
|
||||||
.streamName());
|
|
||||||
numSkippedShardSyncTask += 1;
|
|
||||||
} else {
|
|
||||||
log.info(
|
|
||||||
"Submitted shard sync task for stream {} because of reason {}",
|
|
||||||
shardSyncTaskManager
|
|
||||||
.shardDetector()
|
|
||||||
.streamIdentifier()
|
|
||||||
.streamName(),
|
|
||||||
shardSyncResponse.reasonForDecision());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.info(
|
|
||||||
"Skipping shard sync for {} due to the reason - {}",
|
|
||||||
streamIdentifier,
|
|
||||||
shardSyncResponse.reasonForDecision());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
isRunSuccess = true;
|
|
||||||
} catch (Exception e) {
|
|
||||||
log.error("Caught exception while running periodic shard syncer.", e);
|
|
||||||
} finally {
|
|
||||||
scope.addData(
|
|
||||||
"NumStreamsWithPartialLeases",
|
|
||||||
numStreamsWithPartialLeases,
|
|
||||||
StandardUnit.COUNT,
|
|
||||||
MetricsLevel.SUMMARY);
|
|
||||||
scope.addData("NumStreamsToSync", numStreamsToSync, StandardUnit.COUNT, MetricsLevel.SUMMARY);
|
|
||||||
scope.addData(
|
|
||||||
"NumSkippedShardSyncTask", numSkippedShardSyncTask, StandardUnit.COUNT, MetricsLevel.SUMMARY);
|
|
||||||
MetricsUtil.addSuccessAndLatency(scope, isRunSuccess, runStartMillis, MetricsLevel.SUMMARY);
|
|
||||||
scope.end();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.debug("WorkerId {} is not a leader, not running the shard sync task", workerId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieve all the streams, along with their associated leases
|
|
||||||
* @param streamIdentifiersToFilter
|
|
||||||
* @return
|
|
||||||
* @throws DependencyException
|
|
||||||
* @throws ProvisionedThroughputException
|
|
||||||
* @throws InvalidStateException
|
|
||||||
*/
|
|
||||||
private Map<StreamIdentifier, List<Lease>> getStreamToLeasesMap(
|
|
||||||
final Set<StreamIdentifier> streamIdentifiersToFilter)
|
|
||||||
throws DependencyException, ProvisionedThroughputException, InvalidStateException {
|
|
||||||
final List<Lease> leases = leaseRefresher.listLeases();
|
|
||||||
if (!isMultiStreamingMode) {
|
|
||||||
Validate.isTrue(streamIdentifiersToFilter.size() == 1);
|
|
||||||
return Collections.singletonMap(streamIdentifiersToFilter.iterator().next(), leases);
|
|
||||||
} else {
|
|
||||||
final Map<StreamIdentifier, List<Lease>> streamToLeasesMap = new HashMap<>();
|
|
||||||
for (Lease lease : leases) {
|
|
||||||
StreamIdentifier streamIdentifier =
|
|
||||||
StreamIdentifier.multiStreamInstance(((MultiStreamLease) lease).streamIdentifier());
|
|
||||||
if (streamIdentifiersToFilter.contains(streamIdentifier)) {
|
|
||||||
streamToLeasesMap
|
|
||||||
.computeIfAbsent(streamIdentifier, s -> new ArrayList<>())
|
|
||||||
.add(lease);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return streamToLeasesMap;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a list of leases for a stream, determine if a shard sync is necessary.
|
|
||||||
* @param streamIdentifier
|
|
||||||
* @param leases
|
|
||||||
* @return
|
|
||||||
*/
|
|
||||||
@VisibleForTesting
|
|
||||||
ShardSyncResponse checkForShardSync(StreamIdentifier streamIdentifier, List<Lease> leases) {
|
|
||||||
if (CollectionUtils.isNullOrEmpty(leases)) {
|
|
||||||
// If the leases is null or empty then we need to do shard sync
|
|
||||||
log.info("No leases found for {}. Will be triggering shard sync", streamIdentifier);
|
|
||||||
return new ShardSyncResponse(true, false, "No leases found for " + streamIdentifier);
|
|
||||||
}
|
|
||||||
// Check if there are any holes in the leases and return the first hole if present.
|
|
||||||
Optional<HashRangeHole> hashRangeHoleOpt = hasHoleInLeases(streamIdentifier, leases);
|
|
||||||
if (hashRangeHoleOpt.isPresent()) {
|
|
||||||
// If hole is present, check if the hole is detected consecutively in previous occurrences.
|
|
||||||
// If hole is determined with high confidence return true; return false otherwise
|
|
||||||
// We are using the high confidence factor to avoid shard sync on any holes during resharding and
|
|
||||||
// lease cleanups or any intermittent issues.
|
|
||||||
final HashRangeHoleTracker hashRangeHoleTracker =
|
|
||||||
hashRangeHoleTrackerMap.computeIfAbsent(streamIdentifier, s -> new HashRangeHoleTracker());
|
|
||||||
final boolean hasHoleWithHighConfidence =
|
|
||||||
hashRangeHoleTracker.hasHighConfidenceOfHoleWith(hashRangeHoleOpt.get());
|
|
||||||
return new ShardSyncResponse(
|
|
||||||
hasHoleWithHighConfidence,
|
|
||||||
true,
|
|
||||||
"Detected same hole for " + hashRangeHoleTracker.getNumConsecutiveHoles()
|
|
||||||
+ " times. Shard sync will be initiated when threshold reaches "
|
|
||||||
+ leasesRecoveryAuditorInconsistencyConfidenceThreshold);
|
|
||||||
} else {
|
|
||||||
// If hole is not present, clear any previous tracking for this stream and return false;
|
|
||||||
hashRangeHoleTrackerMap.remove(streamIdentifier);
|
|
||||||
return new ShardSyncResponse(false, false, "Hash Ranges are complete for " + streamIdentifier);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Object containing metadata about the state of a shard sync
|
|
||||||
*/
|
|
||||||
@Value
|
|
||||||
@Accessors(fluent = true)
|
|
||||||
@VisibleForTesting
|
|
||||||
static class ShardSyncResponse {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Flag to determine if a shard sync is necessary or not
|
|
||||||
*/
|
|
||||||
private final boolean shouldDoShardSync;
|
|
||||||
|
|
||||||
private final boolean isHoleDetected;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Reason behind the state of 'shouldDoShardSync' flag
|
|
||||||
*/
|
|
||||||
private final String reasonForDecision;
|
|
||||||
}
|
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
Optional<HashRangeHole> hasHoleInLeases(StreamIdentifier streamIdentifier, List<Lease> leases) {
|
|
||||||
// Filter the leases with any checkpoint other than shard end.
|
|
||||||
List<Lease> activeLeases = leases.stream()
|
|
||||||
.filter(lease ->
|
|
||||||
lease.checkpoint() != null && !lease.checkpoint().isShardEnd())
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
List<Lease> activeLeasesWithHashRanges = fillWithHashRangesIfRequired(streamIdentifier, activeLeases);
|
|
||||||
return checkForHoleInHashKeyRanges(streamIdentifier, activeLeasesWithHashRanges);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If leases are missing hashranges information, update the leases in-memory as well as in the lease storage
|
|
||||||
// by learning from kinesis shards.
|
|
||||||
private List<Lease> fillWithHashRangesIfRequired(StreamIdentifier streamIdentifier, List<Lease> activeLeases) {
|
|
||||||
List<Lease> activeLeasesWithNoHashRanges = activeLeases.stream()
|
|
||||||
.filter(lease -> lease.hashKeyRangeForLease() == null)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
Optional<Lease> minLeaseOpt = activeLeasesWithNoHashRanges.stream().min(Comparator.comparing(Lease::leaseKey));
|
|
||||||
if (minLeaseOpt.isPresent()) {
|
|
||||||
// TODO : use minLease for new ListShards with startingShardId
|
|
||||||
final Lease minLease = minLeaseOpt.get();
|
|
||||||
final ShardDetector shardDetector = shardSyncTaskManagerProvider
|
|
||||||
.apply(currentStreamConfigMap.get(streamIdentifier))
|
|
||||||
.shardDetector();
|
|
||||||
final Map<String, Shard> kinesisShards =
|
|
||||||
shardDetector.listShards().stream().collect(Collectors.toMap(Shard::shardId, shard -> shard));
|
|
||||||
return activeLeases.stream()
|
|
||||||
.map(lease -> {
|
|
||||||
if (lease.hashKeyRangeForLease() == null) {
|
|
||||||
final String shardId = lease instanceof MultiStreamLease
|
|
||||||
? ((MultiStreamLease) lease).shardId()
|
|
||||||
: lease.leaseKey();
|
|
||||||
final Shard shard = kinesisShards.get(shardId);
|
|
||||||
if (shard == null) {
|
|
||||||
return lease;
|
|
||||||
}
|
|
||||||
lease.hashKeyRange(fromHashKeyRange(shard.hashKeyRange()));
|
|
||||||
try {
|
|
||||||
leaseRefresher.updateLeaseWithMetaInfo(lease, UpdateField.HASH_KEY_RANGE);
|
|
||||||
} catch (Exception e) {
|
|
||||||
log.warn(
|
|
||||||
"Unable to update hash range key information for lease {} of stream {}. "
|
|
||||||
+ "This may result in explicit lease sync.",
|
|
||||||
lease.leaseKey(),
|
|
||||||
streamIdentifier);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lease;
|
|
||||||
})
|
|
||||||
.filter(lease -> lease.hashKeyRangeForLease() != null)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
} else {
|
|
||||||
return activeLeases;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
static Optional<HashRangeHole> checkForHoleInHashKeyRanges(
|
|
||||||
StreamIdentifier streamIdentifier, List<Lease> leasesWithHashKeyRanges) {
|
|
||||||
// Sort the hash ranges by starting hash key.
|
|
||||||
List<Lease> sortedLeasesWithHashKeyRanges = sortLeasesByHashRange(leasesWithHashKeyRanges);
|
|
||||||
if (sortedLeasesWithHashKeyRanges.isEmpty()) {
|
|
||||||
log.error("No leases with valid hashranges found for stream {}", streamIdentifier);
|
|
||||||
return Optional.of(new HashRangeHole());
|
|
||||||
}
|
|
||||||
// Validate for hashranges bounds.
|
|
||||||
if (!sortedLeasesWithHashKeyRanges
|
|
||||||
.get(0)
|
|
||||||
.hashKeyRangeForLease()
|
|
||||||
.startingHashKey()
|
|
||||||
.equals(MIN_HASH_KEY)
|
|
||||||
|| !sortedLeasesWithHashKeyRanges
|
|
||||||
.get(sortedLeasesWithHashKeyRanges.size() - 1)
|
|
||||||
.hashKeyRangeForLease()
|
|
||||||
.endingHashKey()
|
|
||||||
.equals(MAX_HASH_KEY)) {
|
|
||||||
log.error(
|
|
||||||
"Incomplete hash range found for stream {} between {} and {}.",
|
|
||||||
streamIdentifier,
|
|
||||||
sortedLeasesWithHashKeyRanges.get(0),
|
|
||||||
sortedLeasesWithHashKeyRanges.get(sortedLeasesWithHashKeyRanges.size() - 1));
|
|
||||||
return Optional.of(new HashRangeHole(
|
|
||||||
sortedLeasesWithHashKeyRanges.get(0).hashKeyRangeForLease(),
|
|
||||||
sortedLeasesWithHashKeyRanges
|
|
||||||
.get(sortedLeasesWithHashKeyRanges.size() - 1)
|
|
||||||
.hashKeyRangeForLease()));
|
|
||||||
}
|
|
||||||
// Check for any holes in the sorted hashrange intervals.
|
|
||||||
if (sortedLeasesWithHashKeyRanges.size() > 1) {
|
|
||||||
Lease leftMostLeaseToReportInCaseOfHole = sortedLeasesWithHashKeyRanges.get(0);
|
|
||||||
HashKeyRangeForLease leftLeaseHashRange = leftMostLeaseToReportInCaseOfHole.hashKeyRangeForLease();
|
|
||||||
for (int i = 1; i < sortedLeasesWithHashKeyRanges.size(); i++) {
|
|
||||||
final HashKeyRangeForLease rightLeaseHashRange =
|
|
||||||
sortedLeasesWithHashKeyRanges.get(i).hashKeyRangeForLease();
|
|
||||||
final BigInteger rangeDiff =
|
|
||||||
rightLeaseHashRange.startingHashKey().subtract(leftLeaseHashRange.endingHashKey());
|
|
||||||
// Case of overlapping leases when the rangediff is 0 or negative.
|
|
||||||
// signum() will be -1 for negative and 0 if value is 0.
|
|
||||||
// Merge the range for further tracking.
|
|
||||||
if (rangeDiff.signum() <= 0) {
|
|
||||||
leftLeaseHashRange = new HashKeyRangeForLease(
|
|
||||||
leftLeaseHashRange.startingHashKey(),
|
|
||||||
leftLeaseHashRange.endingHashKey().max(rightLeaseHashRange.endingHashKey()));
|
|
||||||
} else {
|
|
||||||
// Case of non overlapping leases when rangediff is positive. signum() will be 1 for positive.
|
|
||||||
// If rangeDiff is 1, then it is a case of continuous hashrange. If not, it is a hole.
|
|
||||||
if (!rangeDiff.equals(BigInteger.ONE)) {
|
|
||||||
log.error(
|
|
||||||
"Incomplete hash range found for {} between {} and {}.",
|
|
||||||
streamIdentifier,
|
|
||||||
leftMostLeaseToReportInCaseOfHole,
|
|
||||||
sortedLeasesWithHashKeyRanges.get(i));
|
|
||||||
return Optional.of(new HashRangeHole(
|
|
||||||
leftMostLeaseToReportInCaseOfHole.hashKeyRangeForLease(),
|
|
||||||
sortedLeasesWithHashKeyRanges.get(i).hashKeyRangeForLease()));
|
|
||||||
}
|
|
||||||
leftMostLeaseToReportInCaseOfHole = sortedLeasesWithHashKeyRanges.get(i);
|
|
||||||
leftLeaseHashRange = rightLeaseHashRange;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Optional.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
static List<Lease> sortLeasesByHashRange(List<Lease> leasesWithHashKeyRanges) {
|
|
||||||
if (leasesWithHashKeyRanges.size() == 0 || leasesWithHashKeyRanges.size() == 1) {
|
|
||||||
return leasesWithHashKeyRanges;
|
|
||||||
}
|
|
||||||
Collections.sort(leasesWithHashKeyRanges, new HashKeyRangeComparator());
|
|
||||||
return leasesWithHashKeyRanges;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Value
|
|
||||||
private static class HashRangeHole {
|
|
||||||
HashRangeHole() {
|
|
||||||
hashRangeAtStartOfPossibleHole = hashRangeAtEndOfPossibleHole = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
HashRangeHole(
|
|
||||||
HashKeyRangeForLease hashRangeAtStartOfPossibleHole,
|
|
||||||
HashKeyRangeForLease hashRangeAtEndOfPossibleHole) {
|
|
||||||
this.hashRangeAtStartOfPossibleHole = hashRangeAtStartOfPossibleHole;
|
|
||||||
this.hashRangeAtEndOfPossibleHole = hashRangeAtEndOfPossibleHole;
|
|
||||||
}
|
|
||||||
|
|
||||||
private final HashKeyRangeForLease hashRangeAtStartOfPossibleHole;
|
|
||||||
private final HashKeyRangeForLease hashRangeAtEndOfPossibleHole;
|
|
||||||
}
|
|
||||||
|
|
||||||
private class HashRangeHoleTracker {
|
|
||||||
private HashRangeHole hashRangeHole;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
private Integer numConsecutiveHoles;
|
|
||||||
|
|
||||||
public boolean hasHighConfidenceOfHoleWith(@NonNull HashRangeHole hashRangeHole) {
|
|
||||||
if (hashRangeHole.equals(this.hashRangeHole)) {
|
|
||||||
++this.numConsecutiveHoles;
|
|
||||||
} else {
|
|
||||||
this.hashRangeHole = hashRangeHole;
|
|
||||||
this.numConsecutiveHoles = 1;
|
|
||||||
}
|
|
||||||
return numConsecutiveHoles >= leasesRecoveryAuditorInconsistencyConfidenceThreshold;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper class to compare leases based on their hash range.
|
|
||||||
*/
|
|
||||||
private static class HashKeyRangeComparator implements Comparator<Lease>, Serializable {
|
|
||||||
|
|
||||||
private static final long serialVersionUID = 1L;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int compare(Lease lease, Lease otherLease) {
|
|
||||||
Validate.notNull(lease);
|
|
||||||
Validate.notNull(otherLease);
|
|
||||||
Validate.notNull(lease.hashKeyRangeForLease());
|
|
||||||
Validate.notNull(otherLease.hashKeyRangeForLease());
|
|
||||||
return ComparisonChain.start()
|
|
||||||
.compare(
|
|
||||||
lease.hashKeyRangeForLease().startingHashKey(),
|
|
||||||
otherLease.hashKeyRangeForLease().startingHashKey())
|
|
||||||
.compare(
|
|
||||||
lease.hashKeyRangeForLease().endingHashKey(),
|
|
||||||
otherLease.hashKeyRangeForLease().endingHashKey())
|
|
||||||
.result();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,48 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.ToString;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
|
|
||||||
@Getter
|
|
||||||
@ToString
|
|
||||||
@Slf4j
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
class RejectedTaskEvent implements DiagnosticEvent {
|
|
||||||
private static final String MESSAGE = "Review your thread configuration to prevent task rejections. "
|
|
||||||
+ "Task rejections will slow down your application and some shards may stop processing. ";
|
|
||||||
|
|
||||||
private ExecutorStateEvent executorStateEvent;
|
|
||||||
private Throwable throwable;
|
|
||||||
|
|
||||||
RejectedTaskEvent(ExecutorStateEvent executorStateEvent, Throwable throwable) {
|
|
||||||
this.executorStateEvent = executorStateEvent;
|
|
||||||
this.throwable = throwable;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void accept(DiagnosticEventHandler visitor) {
|
|
||||||
visitor.visit(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String message() {
|
|
||||||
return MESSAGE + executorStateEvent.message();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,64 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.SynchronousQueue;
|
|
||||||
import java.util.concurrent.ThreadFactory;
|
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|
||||||
import lombok.Data;
|
|
||||||
import lombok.NonNull;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer;
|
|
||||||
import software.amazon.kinesis.leases.ShardInfo;
|
|
||||||
import software.amazon.kinesis.processor.Checkpointer;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
@Data
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public class SchedulerCoordinatorFactory implements CoordinatorFactory {
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public ExecutorService createExecutorService() {
|
|
||||||
return new SchedulerThreadPoolExecutor(new ThreadFactoryBuilder()
|
|
||||||
.setNameFormat("ShardRecordProcessor-%04d")
|
|
||||||
.build());
|
|
||||||
}
|
|
||||||
|
|
||||||
static class SchedulerThreadPoolExecutor extends ThreadPoolExecutor {
|
|
||||||
private static final long DEFAULT_KEEP_ALIVE = 60L;
|
|
||||||
|
|
||||||
SchedulerThreadPoolExecutor(ThreadFactory threadFactory) {
|
|
||||||
super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, new SynchronousQueue<>(), threadFactory);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public ShardRecordProcessorCheckpointer createRecordProcessorCheckpointer(
|
|
||||||
@NonNull final ShardInfo shardInfo, @NonNull final Checkpointer checkpoint) {
|
|
||||||
return new ShardRecordProcessorCheckpointer(shardInfo, checkpoint);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2019 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.coordinator;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A listener for callbacks on changes worker state
|
|
||||||
*/
|
|
||||||
@FunctionalInterface
|
|
||||||
public interface WorkerStateChangeListener {
|
|
||||||
enum WorkerState {
|
|
||||||
CREATED,
|
|
||||||
INITIALIZING,
|
|
||||||
STARTED,
|
|
||||||
SHUT_DOWN_STARTED,
|
|
||||||
SHUT_DOWN
|
|
||||||
}
|
|
||||||
|
|
||||||
void onWorkerStateChange(WorkerState newState);
|
|
||||||
|
|
||||||
default void onAllInitializationAttemptsFailed(Throwable e) {}
|
|
||||||
}
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator.assignment;
|
|
||||||
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.leases.Lease;
|
|
||||||
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public interface LeaseAssignmentDecider {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Assigns expiredOrUnAssignedLeases to the available workers.
|
|
||||||
*/
|
|
||||||
void assignExpiredOrUnassignedLeases(final List<Lease> expiredOrUnAssignedLeases);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Balances the leases between workers in the fleet.
|
|
||||||
* Implementation can choose to balance leases based on lease count or throughput or to bring the variance in
|
|
||||||
* resource utilization to a minimum.
|
|
||||||
* Check documentation on implementation class to see how it balances the leases.
|
|
||||||
*/
|
|
||||||
void balanceWorkerVariance();
|
|
||||||
}
|
|
||||||
|
|
@ -1,722 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator.assignment;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
|
||||||
import java.time.Instant;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.Callable;
|
|
||||||
import java.util.concurrent.CompletableFuture;
|
|
||||||
import java.util.concurrent.CompletionException;
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Executors;
|
|
||||||
import java.util.concurrent.Future;
|
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
|
||||||
import java.util.function.Function;
|
|
||||||
import java.util.function.Supplier;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import lombok.Getter;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
|
||||||
import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.coordinator.LeaderDecider;
|
|
||||||
import software.amazon.kinesis.leases.Lease;
|
|
||||||
import software.amazon.kinesis.leases.LeaseManagementConfig;
|
|
||||||
import software.amazon.kinesis.leases.LeaseRefresher;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.DependencyException;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.InvalidStateException;
|
|
||||||
import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsFactory;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsLevel;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsScope;
|
|
||||||
import software.amazon.kinesis.metrics.MetricsUtil;
|
|
||||||
import software.amazon.kinesis.metrics.NullMetricsScope;
|
|
||||||
import software.amazon.kinesis.worker.metricstats.WorkerMetricStats;
|
|
||||||
import software.amazon.kinesis.worker.metricstats.WorkerMetricStatsDAO;
|
|
||||||
|
|
||||||
import static java.util.Objects.isNull;
|
|
||||||
import static java.util.Objects.nonNull;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs the LeaseAssignment for the application. This starts by loading the leases and workerMetrics from the
|
|
||||||
* storage and then starts by assignment (in-memory) of expired and/or unassigned leases after which it tries to perform
|
|
||||||
* balancing of load among the workers by re-assign leases.
|
|
||||||
* In the end, performs actual assignment by writing to storage.
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public final class LeaseAssignmentManager {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Default number of continuous failure execution after which leadership is released.
|
|
||||||
*/
|
|
||||||
private static final int DEFAULT_FAILURE_COUNT_TO_SWITCH_LEADER = 3;
|
|
||||||
|
|
||||||
private static final String FORCE_LEADER_RELEASE_METRIC_NAME = "ForceLeaderRelease";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Default retry attempt for loading leases and workers before giving up.
|
|
||||||
*/
|
|
||||||
private static final int DDB_LOAD_RETRY_ATTEMPT = 1;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Internal threadpool used to parallely perform assignment operation by calling storage.
|
|
||||||
*/
|
|
||||||
private static final ExecutorService LEASE_ASSIGNMENT_CALL_THREAD_POOL =
|
|
||||||
Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
|
|
||||||
|
|
||||||
private static final String METRICS_LEASE_ASSIGNMENT_MANAGER = "LeaseAssignmentManager";
|
|
||||||
private static final String METRICS_INCOMPLETE_EXPIRED_LEASES_ASSIGNMENT =
|
|
||||||
"LeaseAssignmentManager.IncompleteExpiredLeasesAssignment";
|
|
||||||
public static final int DEFAULT_NO_OF_SKIP_STAT_FOR_DEAD_WORKER_THRESHOLD = 2;
|
|
||||||
|
|
||||||
private final LeaseRefresher leaseRefresher;
|
|
||||||
private final WorkerMetricStatsDAO workerMetricsDAO;
|
|
||||||
private final LeaderDecider leaderDecider;
|
|
||||||
private final LeaseManagementConfig.WorkerUtilizationAwareAssignmentConfig config;
|
|
||||||
private final String currentWorkerId;
|
|
||||||
private final Long leaseDurationMillis;
|
|
||||||
private final MetricsFactory metricsFactory;
|
|
||||||
private final ScheduledExecutorService executorService;
|
|
||||||
private final Supplier<Long> nanoTimeProvider;
|
|
||||||
private final int maxLeasesForWorker;
|
|
||||||
private final LeaseManagementConfig.GracefulLeaseHandoffConfig gracefulLeaseHandoffConfig;
|
|
||||||
private boolean tookOverLeadershipInThisRun = false;
|
|
||||||
private final Map<String, Lease> prevRunLeasesState = new HashMap<>();
|
|
||||||
private final long leaseAssignmentIntervalMillis;
|
|
||||||
|
|
||||||
private Future<?> managerFuture;
|
|
||||||
|
|
||||||
private int noOfContinuousFailedAttempts = 0;
|
|
||||||
private int lamRunCounter = 0;
|
|
||||||
|
|
||||||
public synchronized void start() {
|
|
||||||
if (isNull(managerFuture)) {
|
|
||||||
// LAM can be dynamically started/stopped and restarted during MigrationStateMachine execution
|
|
||||||
// so reset the flag to refresh the state before processing during a restart of LAM.
|
|
||||||
tookOverLeadershipInThisRun = false;
|
|
||||||
managerFuture = executorService.scheduleWithFixedDelay(
|
|
||||||
this::performAssignment, 0L, (int) (leaseAssignmentIntervalMillis), TimeUnit.MILLISECONDS);
|
|
||||||
log.info("Started LeaseAssignmentManager");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
log.info("LeaseAssignmentManager already running...");
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized void stop() {
|
|
||||||
if (nonNull(managerFuture)) {
|
|
||||||
log.info("Completed shutdown of LeaseAssignmentManager");
|
|
||||||
managerFuture.cancel(true);
|
|
||||||
managerFuture = null;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
log.info("LeaseAssignmentManager is not running...");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates the MetricsScope for given {@param operation} by calling metricsFactory and falls back to
|
|
||||||
* NullMetricsScope if failed to create MetricsScope.
|
|
||||||
* @param operation Operation name for MetricsScope
|
|
||||||
* @return instance of MetricsScope
|
|
||||||
*/
|
|
||||||
private MetricsScope createMetricsScope(final String operation) {
|
|
||||||
try {
|
|
||||||
return MetricsUtil.createMetricsWithOperation(metricsFactory, operation);
|
|
||||||
} catch (final Exception e) {
|
|
||||||
log.error("Failed to create metrics scope defaulting to no metrics.", e);
|
|
||||||
return new NullMetricsScope();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void performAssignment() {
|
|
||||||
|
|
||||||
final MetricsScope metricsScope = createMetricsScope(METRICS_LEASE_ASSIGNMENT_MANAGER);
|
|
||||||
final long startTime = System.currentTimeMillis();
|
|
||||||
boolean success = false;
|
|
||||||
|
|
||||||
try {
|
|
||||||
|
|
||||||
// If the current worker is not leader, then do nothing as assignment is executed on leader.
|
|
||||||
if (!leaderDecider.isLeader(currentWorkerId)) {
|
|
||||||
log.info("Current worker {} is not a leader, ignore", currentWorkerId);
|
|
||||||
this.tookOverLeadershipInThisRun = false;
|
|
||||||
success = true;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!this.tookOverLeadershipInThisRun) {
|
|
||||||
// This means that there was leader change, perform cleanup of state as this is leader switch.
|
|
||||||
this.tookOverLeadershipInThisRun = true;
|
|
||||||
this.lamRunCounter = 0;
|
|
||||||
prepareAfterLeaderSwitch();
|
|
||||||
}
|
|
||||||
log.info("Current worker {} is a leader, performing assignment", currentWorkerId);
|
|
||||||
|
|
||||||
final InMemoryStorageView inMemoryStorageView = new InMemoryStorageView();
|
|
||||||
|
|
||||||
final long loadStartTime = System.currentTimeMillis();
|
|
||||||
inMemoryStorageView.loadInMemoryStorageView(metricsScope);
|
|
||||||
MetricsUtil.addLatency(metricsScope, "LeaseAndWorkerMetricsLoad", loadStartTime, MetricsLevel.DETAILED);
|
|
||||||
|
|
||||||
publishLeaseAndWorkerCountMetrics(metricsScope, inMemoryStorageView);
|
|
||||||
final LeaseAssignmentDecider leaseAssignmentDecider = new VarianceBasedLeaseAssignmentDecider(
|
|
||||||
inMemoryStorageView,
|
|
||||||
config.dampeningPercentage(),
|
|
||||||
config.reBalanceThresholdPercentage(),
|
|
||||||
config.allowThroughputOvershoot());
|
|
||||||
|
|
||||||
updateLeasesLastCounterIncrementNanosAndLeaseShutdownTimeout(
|
|
||||||
inMemoryStorageView.getLeaseList(), inMemoryStorageView.getLeaseTableScanTime());
|
|
||||||
|
|
||||||
// This does not include the leases from the worker that has expired (based on WorkerMetricStats's
|
|
||||||
// lastUpdateTime)
|
|
||||||
// but the lease is not expired (based on the leaseCounter on lease).
|
|
||||||
// If a worker has died, the lease will be expired and assigned in next iteration.
|
|
||||||
final List<Lease> expiredOrUnAssignedLeases = inMemoryStorageView.getLeaseList().stream()
|
|
||||||
.filter(lease -> lease.isExpired(
|
|
||||||
TimeUnit.MILLISECONDS.toNanos(leaseDurationMillis),
|
|
||||||
inMemoryStorageView.getLeaseTableScanTime())
|
|
||||||
|| Objects.isNull(lease.actualOwner()))
|
|
||||||
// marking them for direct reassignment.
|
|
||||||
.map(l -> l.isExpiredOrUnassigned(true))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
log.info("Total expiredOrUnassignedLeases count : {}", expiredOrUnAssignedLeases.size());
|
|
||||||
metricsScope.addData(
|
|
||||||
"ExpiredLeases", expiredOrUnAssignedLeases.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY);
|
|
||||||
|
|
||||||
final long expiredAndUnassignedLeaseAssignmentStartTime = System.currentTimeMillis();
|
|
||||||
leaseAssignmentDecider.assignExpiredOrUnassignedLeases(expiredOrUnAssignedLeases);
|
|
||||||
MetricsUtil.addLatency(
|
|
||||||
metricsScope,
|
|
||||||
"AssignExpiredOrUnassignedLeases",
|
|
||||||
expiredAndUnassignedLeaseAssignmentStartTime,
|
|
||||||
MetricsLevel.DETAILED);
|
|
||||||
|
|
||||||
if (!expiredOrUnAssignedLeases.isEmpty()) {
|
|
||||||
// When expiredOrUnAssignedLeases is not empty, that means
|
|
||||||
// that we were not able to assign all expired or unassigned leases and hit the maxThroughput
|
|
||||||
// per worker for all workers.
|
|
||||||
log.warn("Not able to assign all expiredOrUnAssignedLeases");
|
|
||||||
metricsScope.addData(
|
|
||||||
"LeaseSpillover", expiredOrUnAssignedLeases.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (shouldRunVarianceBalancing()) {
|
|
||||||
final long balanceWorkerVarianceStartTime = System.currentTimeMillis();
|
|
||||||
final int totalNewAssignmentBeforeWorkerVarianceBalancing =
|
|
||||||
inMemoryStorageView.leaseToNewAssignedWorkerMap.size();
|
|
||||||
leaseAssignmentDecider.balanceWorkerVariance();
|
|
||||||
MetricsUtil.addLatency(
|
|
||||||
metricsScope, "BalanceWorkerVariance", balanceWorkerVarianceStartTime, MetricsLevel.DETAILED);
|
|
||||||
metricsScope.addData(
|
|
||||||
"NumOfLeasesReassignment",
|
|
||||||
inMemoryStorageView.leaseToNewAssignedWorkerMap.size()
|
|
||||||
- totalNewAssignmentBeforeWorkerVarianceBalancing,
|
|
||||||
StandardUnit.COUNT,
|
|
||||||
MetricsLevel.SUMMARY);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (inMemoryStorageView.leaseToNewAssignedWorkerMap.isEmpty()) {
|
|
||||||
log.info("No new lease assignment performed in this iteration");
|
|
||||||
}
|
|
||||||
|
|
||||||
parallelyAssignLeases(inMemoryStorageView, metricsScope);
|
|
||||||
printPerWorkerLeases(inMemoryStorageView);
|
|
||||||
deleteStaleWorkerMetricsEntries(inMemoryStorageView, metricsScope);
|
|
||||||
success = true;
|
|
||||||
noOfContinuousFailedAttempts = 0;
|
|
||||||
} catch (final Exception e) {
|
|
||||||
log.error("LeaseAssignmentManager failed to perform lease assignment.", e);
|
|
||||||
noOfContinuousFailedAttempts++;
|
|
||||||
if (noOfContinuousFailedAttempts >= DEFAULT_FAILURE_COUNT_TO_SWITCH_LEADER) {
|
|
||||||
log.error(
|
|
||||||
"Failed to perform assignment {} times in a row, releasing leadership from worker : {}",
|
|
||||||
DEFAULT_FAILURE_COUNT_TO_SWITCH_LEADER,
|
|
||||||
currentWorkerId);
|
|
||||||
MetricsUtil.addCount(metricsScope, FORCE_LEADER_RELEASE_METRIC_NAME, 1, MetricsLevel.SUMMARY);
|
|
||||||
leaderDecider.releaseLeadershipIfHeld();
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
MetricsUtil.addSuccessAndLatency(metricsScope, success, startTime, MetricsLevel.SUMMARY);
|
|
||||||
MetricsUtil.endScope(metricsScope);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean shouldRunVarianceBalancing() {
|
|
||||||
final boolean response = this.lamRunCounter == 0;
|
|
||||||
/*
|
|
||||||
To avoid lamRunCounter grow large, keep it within [0,varianceBalancingFrequency).
|
|
||||||
If varianceBalancingFrequency is 5 lamRunCounter value will be within 0 to 4 and method return true when
|
|
||||||
lamRunCounter is 0.
|
|
||||||
*/
|
|
||||||
this.lamRunCounter = (this.lamRunCounter + 1) % config.varianceBalancingFrequency();
|
|
||||||
return response;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Deletes the WorkerMetricStats entries which are stale(not updated since long time, ref
|
|
||||||
* {@link LeaseAssignmentManager#isWorkerMetricsEntryStale} for the condition to evaluate staleness)
|
|
||||||
*/
|
|
||||||
private void deleteStaleWorkerMetricsEntries(
|
|
||||||
final InMemoryStorageView inMemoryStorageView, final MetricsScope metricsScope) {
|
|
||||||
final long startTime = System.currentTimeMillis();
|
|
||||||
try {
|
|
||||||
final List<WorkerMetricStats> staleWorkerMetricsList = inMemoryStorageView.getWorkerMetricsList().stream()
|
|
||||||
.filter(this::isWorkerMetricsEntryStale)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
MetricsUtil.addCount(
|
|
||||||
metricsScope, "TotalStaleWorkerMetricsEntry", staleWorkerMetricsList.size(), MetricsLevel.DETAILED);
|
|
||||||
log.info("Number of stale workerMetrics entries : {}", staleWorkerMetricsList.size());
|
|
||||||
log.info("Stale workerMetrics list : {}", staleWorkerMetricsList);
|
|
||||||
|
|
||||||
final List<CompletableFuture<Boolean>> completableFutures = staleWorkerMetricsList.stream()
|
|
||||||
.map(workerMetrics -> CompletableFuture.supplyAsync(
|
|
||||||
() -> workerMetricsDAO.deleteMetrics(workerMetrics), LEASE_ASSIGNMENT_CALL_THREAD_POOL))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
CompletableFuture.allOf(completableFutures.toArray(new CompletableFuture[0]))
|
|
||||||
.join();
|
|
||||||
} finally {
|
|
||||||
MetricsUtil.addLatency(metricsScope, "StaleWorkerMetricsCleanup", startTime, MetricsLevel.DETAILED);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* WorkerMetricStats entry is considered stale if the lastUpdateTime of the workerMetrics is older than
|
|
||||||
* workerMetricsStalenessThreshold * workerMetricsReporterFreqInMillis.
|
|
||||||
*/
|
|
||||||
private boolean isWorkerMetricsEntryStale(final WorkerMetricStats workerMetrics) {
|
|
||||||
return Duration.between(Instant.ofEpochSecond(workerMetrics.getLastUpdateTime()), Instant.now())
|
|
||||||
.toMillis()
|
|
||||||
> config.staleWorkerMetricsEntryCleanupDuration().toMillis();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void printPerWorkerLeases(final InMemoryStorageView storageView) {
|
|
||||||
storageView.getActiveWorkerIdSet().forEach(activeWorkerId -> {
|
|
||||||
log.info(
|
|
||||||
"Worker : {} and total leases : {} and totalThroughput : {}",
|
|
||||||
activeWorkerId,
|
|
||||||
Optional.ofNullable(storageView.getWorkerToLeasesMap().get(activeWorkerId))
|
|
||||||
.orElse(Collections.EMPTY_SET)
|
|
||||||
.size(),
|
|
||||||
storageView.getWorkerToTotalAssignedThroughputMap().get(activeWorkerId));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private void parallelyAssignLeases(final InMemoryStorageView inMemoryStorageView, final MetricsScope metricsScope) {
|
|
||||||
final AtomicInteger failedAssignmentCounter = new AtomicInteger(0);
|
|
||||||
final long startTime = System.currentTimeMillis();
|
|
||||||
boolean success = false;
|
|
||||||
try {
|
|
||||||
CompletableFuture.allOf(inMemoryStorageView.getLeaseToNewAssignedWorkerMap().entrySet().stream()
|
|
||||||
// ignore leases that are heartbeating and pending graceful shutdown checkpoint.
|
|
||||||
.filter(entry -> !entry.getKey().blockedOnPendingCheckpoint(getNanoTimeMillis()))
|
|
||||||
.map(entry -> CompletableFuture.supplyAsync(
|
|
||||||
() -> {
|
|
||||||
try {
|
|
||||||
final Lease lease = entry.getKey();
|
|
||||||
if (gracefulLeaseHandoffConfig.isGracefulLeaseHandoffEnabled()
|
|
||||||
&& lease.isEligibleForGracefulShutdown()) {
|
|
||||||
return handleGracefulLeaseHandoff(
|
|
||||||
lease, entry.getValue(), failedAssignmentCounter);
|
|
||||||
} else {
|
|
||||||
return handleRegularLeaseAssignment(
|
|
||||||
lease, entry.getValue(), failedAssignmentCounter);
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new CompletionException(e);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
LEASE_ASSIGNMENT_CALL_THREAD_POOL))
|
|
||||||
.toArray(CompletableFuture[]::new))
|
|
||||||
.join();
|
|
||||||
success = true;
|
|
||||||
} finally {
|
|
||||||
MetricsUtil.addCount(
|
|
||||||
metricsScope, "FailedAssignmentCount", failedAssignmentCounter.get(), MetricsLevel.DETAILED);
|
|
||||||
MetricsUtil.addSuccessAndLatency(
|
|
||||||
metricsScope, "ParallelyAssignLeases", success, startTime, MetricsLevel.DETAILED);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean handleGracefulLeaseHandoff(Lease lease, String newOwner, AtomicInteger failedAssignmentCounter)
|
|
||||||
throws ProvisionedThroughputException, InvalidStateException, DependencyException {
|
|
||||||
final boolean response = leaseRefresher.initiateGracefulLeaseHandoff(lease, newOwner);
|
|
||||||
if (response) {
|
|
||||||
// new handoff assignment. add the timeout.
|
|
||||||
lease.checkpointOwnerTimeoutTimestampMillis(getCheckpointOwnerTimeoutTimestampMillis());
|
|
||||||
} else {
|
|
||||||
failedAssignmentCounter.incrementAndGet();
|
|
||||||
}
|
|
||||||
return response;
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean handleRegularLeaseAssignment(Lease lease, String newOwner, AtomicInteger failedAssignmentCounter)
|
|
||||||
throws ProvisionedThroughputException, InvalidStateException, DependencyException {
|
|
||||||
final boolean response = leaseRefresher.assignLease(lease, newOwner);
|
|
||||||
if (response) {
|
|
||||||
// Successful assignment updates the leaseCounter, update the nanoTime for counter update.
|
|
||||||
lease.lastCounterIncrementNanos(nanoTimeProvider.get());
|
|
||||||
} else {
|
|
||||||
failedAssignmentCounter.incrementAndGet();
|
|
||||||
}
|
|
||||||
return response;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void publishLeaseAndWorkerCountMetrics(
|
|
||||||
final MetricsScope metricsScope, final InMemoryStorageView inMemoryStorageView) {
|
|
||||||
// Names of the metrics are kept in sync with what is published in LeaseTaker.
|
|
||||||
metricsScope.addData(
|
|
||||||
"TotalLeases", inMemoryStorageView.leaseList.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY);
|
|
||||||
metricsScope.addData(
|
|
||||||
"NumWorkers", inMemoryStorageView.activeWorkerMetrics.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Method updates all new leases with currentTime if the counter is updated since last run else keeps whatever
|
|
||||||
// was prev and update the prevRunLeasesState
|
|
||||||
private void updateLeasesLastCounterIncrementNanosAndLeaseShutdownTimeout(
|
|
||||||
final List<Lease> leaseList, final Long scanTime) {
|
|
||||||
for (final Lease lease : leaseList) {
|
|
||||||
final Lease prevLease = prevRunLeasesState.get(lease.leaseKey());
|
|
||||||
|
|
||||||
// make sure lease shutdown timeouts are tracked.
|
|
||||||
if (lease.shutdownRequested()) {
|
|
||||||
// previous and current leases might have same next and checkpoint owners but there is no
|
|
||||||
// guarantee that the latest shutdown is the same shutdown in the previous lease for example
|
|
||||||
// some other leaders change the lease states while this worker waiting for it's LAM run.
|
|
||||||
// This is the best effort to prevent marking the incorrect timeout.
|
|
||||||
if (isNull(prevLease) || !prevLease.shutdownRequested() || !isSameOwners(lease, prevLease)) {
|
|
||||||
// Add new value if previous is null, previous lease is not shutdown pending or the owners
|
|
||||||
// don't match
|
|
||||||
lease.checkpointOwnerTimeoutTimestampMillis(getCheckpointOwnerTimeoutTimestampMillis());
|
|
||||||
} else {
|
|
||||||
lease.checkpointOwnerTimeoutTimestampMillis(prevLease.checkpointOwnerTimeoutTimestampMillis());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isNull(prevLease)) {
|
|
||||||
lease.lastCounterIncrementNanos(
|
|
||||||
isNull(lease.actualOwner())
|
|
||||||
// This is an unassigned lease, mark as 0L that puts this in first in assignment order
|
|
||||||
? 0L
|
|
||||||
: scanTime);
|
|
||||||
} else {
|
|
||||||
lease.lastCounterIncrementNanos(
|
|
||||||
lease.leaseCounter() > prevLease.leaseCounter()
|
|
||||||
? scanTime
|
|
||||||
: prevLease.lastCounterIncrementNanos());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
prevRunLeasesState.clear();
|
|
||||||
prevRunLeasesState.putAll(leaseList.stream().collect(Collectors.toMap(Lease::leaseKey, Function.identity())));
|
|
||||||
}
|
|
||||||
|
|
||||||
private void prepareAfterLeaderSwitch() {
|
|
||||||
prevRunLeasesState.clear();
|
|
||||||
noOfContinuousFailedAttempts = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* In memory view of the leases and workerMetrics.
|
|
||||||
* This class supports queries (e.g., leases assigned to worker or total throughout assigned to worker).
|
|
||||||
*/
|
|
||||||
@Getter
|
|
||||||
class InMemoryStorageView {
|
|
||||||
|
|
||||||
// This is in-memory view of the workerToLeaseMapping, this is updated in-memory before actual
|
|
||||||
// changes to storage.
|
|
||||||
private final Map<String, Set<Lease>> workerToLeasesMap = new HashMap<>();
|
|
||||||
/**
|
|
||||||
* This is computed initially after the loading leases and then updated when the
|
|
||||||
* {@link InMemoryStorageView#performLeaseAssignment} is called.
|
|
||||||
*/
|
|
||||||
private final Map<String, Double> workerToTotalAssignedThroughputMap = new HashMap<>();
|
|
||||||
/**
|
|
||||||
* Captures the new assignment done during the lifecycle of single run.
|
|
||||||
*/
|
|
||||||
private final Map<Lease, String> leaseToNewAssignedWorkerMap = new HashMap<>();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* List of all leases in the application.
|
|
||||||
*/
|
|
||||||
private List<Lease> leaseList;
|
|
||||||
/**
|
|
||||||
* List of workers which are active (i.e., updated metric stats before the threshold ref)
|
|
||||||
* {@link this#computeWorkerExpiryThresholdInSecond})
|
|
||||||
*/
|
|
||||||
private List<WorkerMetricStats> activeWorkerMetrics;
|
|
||||||
/**
|
|
||||||
* List of all workerMetrics entries from storage.
|
|
||||||
*/
|
|
||||||
private List<WorkerMetricStats> workerMetricsList;
|
|
||||||
/**
|
|
||||||
* List of active workers ids.
|
|
||||||
*/
|
|
||||||
private Set<String> activeWorkerIdSet;
|
|
||||||
/**
|
|
||||||
* Wall time in nanoseconds when the lease table scan was completed.
|
|
||||||
*/
|
|
||||||
private long leaseTableScanTime = 0L;
|
|
||||||
/**
|
|
||||||
* Average throughput for all workers.
|
|
||||||
*/
|
|
||||||
private double targetAverageThroughput;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Update {@ref inMemoryWorkerToLeasesMapping} with the change in ownership and update newLeaseAssignmentMap
|
|
||||||
*
|
|
||||||
* @param lease lease changing assignment
|
|
||||||
* @param newOwner new owner of the lease
|
|
||||||
*/
|
|
||||||
public void performLeaseAssignment(final Lease lease, final String newOwner) {
|
|
||||||
final String existingOwner = lease.actualOwner();
|
|
||||||
workerToLeasesMap.get(existingOwner).remove(lease);
|
|
||||||
workerToLeasesMap
|
|
||||||
.computeIfAbsent(newOwner, owner -> new HashSet<>())
|
|
||||||
.add(lease);
|
|
||||||
updateWorkerThroughput(newOwner, lease.throughputKBps());
|
|
||||||
// Remove the same lease throughput from oldOwner
|
|
||||||
updateWorkerThroughput(existingOwner, -lease.throughputKBps());
|
|
||||||
leaseToNewAssignedWorkerMap.put(lease, newOwner);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Scans the LeaseTable and WorkerMetricStats in parallel and load the data and populate datastructures used
|
|
||||||
* in lease assignment.
|
|
||||||
*/
|
|
||||||
public void loadInMemoryStorageView(final MetricsScope metricsScope) throws Exception {
|
|
||||||
final CompletableFuture<Map.Entry<List<Lease>, List<String>>> leaseListFuture = loadLeaseListAsync();
|
|
||||||
|
|
||||||
final CompletableFuture<List<WorkerMetricStats>> workerMetricsFuture = loadWorkerMetricStats();
|
|
||||||
|
|
||||||
final List<WorkerMetricStats> workerMetricsFromStorage = workerMetricsFuture.join();
|
|
||||||
|
|
||||||
final List<String> listOfWorkerIdOfInvalidWorkerMetricsEntry = workerMetricsFromStorage.stream()
|
|
||||||
.filter(workerMetrics -> !workerMetrics.isValidWorkerMetric())
|
|
||||||
.map(WorkerMetricStats::getWorkerId)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
if (!listOfWorkerIdOfInvalidWorkerMetricsEntry.isEmpty()) {
|
|
||||||
log.warn("List of workerIds with invalid entries : {}", listOfWorkerIdOfInvalidWorkerMetricsEntry);
|
|
||||||
metricsScope.addData(
|
|
||||||
"NumWorkersWithInvalidEntry",
|
|
||||||
listOfWorkerIdOfInvalidWorkerMetricsEntry.size(),
|
|
||||||
StandardUnit.COUNT,
|
|
||||||
MetricsLevel.SUMMARY);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid entries are considered further, for validity of entry refer WorkerMetricStats#isValidWorkerMetrics
|
|
||||||
this.workerMetricsList = workerMetricsFromStorage.stream()
|
|
||||||
.filter(WorkerMetricStats::isValidWorkerMetric)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
log.info("Total WorkerMetricStats available : {}", workerMetricsList.size());
|
|
||||||
final long workerExpiryThreshold = computeWorkerExpiryThresholdInSecond();
|
|
||||||
|
|
||||||
final long countOfWorkersWithFailingWorkerMetric = workerMetricsList.stream()
|
|
||||||
.filter(WorkerMetricStats::isAnyWorkerMetricFailing)
|
|
||||||
.count();
|
|
||||||
if (countOfWorkersWithFailingWorkerMetric != 0) {
|
|
||||||
metricsScope.addData(
|
|
||||||
"NumWorkersWithFailingWorkerMetric",
|
|
||||||
countOfWorkersWithFailingWorkerMetric,
|
|
||||||
StandardUnit.COUNT,
|
|
||||||
MetricsLevel.SUMMARY);
|
|
||||||
}
|
|
||||||
|
|
||||||
final Map.Entry<List<Lease>, List<String>> leaseListResponse = leaseListFuture.join();
|
|
||||||
this.leaseList = leaseListResponse.getKey();
|
|
||||||
if (!leaseListResponse.getValue().isEmpty()) {
|
|
||||||
log.warn("Leases that failed deserialization : {}", leaseListResponse.getValue());
|
|
||||||
MetricsUtil.addCount(
|
|
||||||
metricsScope,
|
|
||||||
"LeaseDeserializationFailureCount",
|
|
||||||
leaseListResponse.getValue().size(),
|
|
||||||
MetricsLevel.SUMMARY);
|
|
||||||
}
|
|
||||||
this.leaseTableScanTime = nanoTimeProvider.get();
|
|
||||||
log.info("Total Leases available : {}", leaseList.size());
|
|
||||||
|
|
||||||
final double averageLeaseThroughput = leaseList.stream()
|
|
||||||
.filter(lease -> nonNull(lease.throughputKBps()))
|
|
||||||
.mapToDouble(Lease::throughputKBps)
|
|
||||||
.average()
|
|
||||||
// If none of the leases has any value, that means its app
|
|
||||||
// startup time and thus assigns 0 in that case to start with.
|
|
||||||
.orElse(0D);
|
|
||||||
/*
|
|
||||||
* If a workerMetrics has a metric (i.e. has -1 value in last index which denotes failure),
|
|
||||||
* skip it from activeWorkerMetrics and no new action on it will be done
|
|
||||||
* (new assignment etc.) until the metric has non -1 value in last index. This is to avoid performing action
|
|
||||||
* with the stale data on worker.
|
|
||||||
*/
|
|
||||||
this.activeWorkerMetrics = workerMetricsList.stream()
|
|
||||||
.filter(workerMetrics -> workerMetrics.getLastUpdateTime() >= workerExpiryThreshold
|
|
||||||
&& !workerMetrics.isAnyWorkerMetricFailing())
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
log.info("activeWorkerMetrics : {}", activeWorkerMetrics.size());
|
|
||||||
targetAverageThroughput =
|
|
||||||
averageLeaseThroughput * leaseList.size() / Math.max(1, activeWorkerMetrics.size());
|
|
||||||
leaseList.forEach(lease -> {
|
|
||||||
if (isNull(lease.throughputKBps())) {
|
|
||||||
// If the lease is unassigned, it will not have any throughput value, use average throughput
|
|
||||||
// as good enough value to start with.
|
|
||||||
lease.throughputKBps(averageLeaseThroughput);
|
|
||||||
}
|
|
||||||
workerToLeasesMap
|
|
||||||
.computeIfAbsent(lease.actualOwner(), workerId -> new HashSet<>())
|
|
||||||
.add(lease);
|
|
||||||
updateWorkerThroughput(lease.actualOwner(), lease.throughputKBps());
|
|
||||||
});
|
|
||||||
|
|
||||||
this.activeWorkerIdSet = new HashSet<>();
|
|
||||||
// Calculate initial ratio
|
|
||||||
this.activeWorkerMetrics.forEach(workerMetrics -> {
|
|
||||||
activeWorkerIdSet.add(workerMetrics.getWorkerId());
|
|
||||||
workerMetrics.setEmaAlpha(config.workerMetricsEMAAlpha());
|
|
||||||
if (workerMetrics.isUsingDefaultWorkerMetric()) {
|
|
||||||
setOperatingRangeAndWorkerMetricsDataForDefaultWorker(
|
|
||||||
workerMetrics,
|
|
||||||
getTotalAssignedThroughput(workerMetrics.getWorkerId()) / targetAverageThroughput);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private void updateWorkerThroughput(final String workerId, final double leaseThroughput) {
|
|
||||||
double value = workerToTotalAssignedThroughputMap.computeIfAbsent(workerId, worker -> (double) 0L);
|
|
||||||
workerToTotalAssignedThroughputMap.put(workerId, value + leaseThroughput);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void setOperatingRangeAndWorkerMetricsDataForDefaultWorker(
|
|
||||||
final WorkerMetricStats workerMetrics, final Double ratio) {
|
|
||||||
// for workers with default WorkerMetricStats, the operating range ceiling of 100 represents the
|
|
||||||
// target throughput. This way, with either heterogeneous or homogeneous fleets
|
|
||||||
// of explicit WorkerMetricStats and default WorkerMetricStats applications, load will be evenly
|
|
||||||
// distributed.
|
|
||||||
log.info(
|
|
||||||
"Worker [{}] is using default WorkerMetricStats, setting initial utilization ratio to [{}].",
|
|
||||||
workerMetrics.getWorkerId(),
|
|
||||||
ratio);
|
|
||||||
workerMetrics.setOperatingRange(ImmutableMap.of("T", ImmutableList.of(100L)));
|
|
||||||
workerMetrics.setMetricStats(ImmutableMap.of("T", ImmutableList.of(ratio * 100, ratio * 100)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Calculates the value threshold in seconds for a worker to be considered as active.
|
|
||||||
* If a worker has not updated the WorkerMetricStats entry within this threshold, the worker is not considered
|
|
||||||
* as active.
|
|
||||||
*
|
|
||||||
* @return wall time in seconds
|
|
||||||
*/
|
|
||||||
private long computeWorkerExpiryThresholdInSecond() {
|
|
||||||
final long timeInSeconds = Duration.ofMillis(System.currentTimeMillis()
|
|
||||||
- DEFAULT_NO_OF_SKIP_STAT_FOR_DEAD_WORKER_THRESHOLD
|
|
||||||
* config.workerMetricsReporterFreqInMillis())
|
|
||||||
.getSeconds();
|
|
||||||
log.info("WorkerMetricStats expiry time in seconds : {}", timeInSeconds);
|
|
||||||
return timeInSeconds;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Looks at inMemoryWorkerToLeasesMapping for lease assignment and figures out if there is room considering
|
|
||||||
* any new assignment that would have happened.
|
|
||||||
*/
|
|
||||||
public boolean isWorkerTotalThroughputLessThanMaxThroughput(final String workerId) {
|
|
||||||
return getTotalAssignedThroughput(workerId) <= config.maxThroughputPerHostKBps();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Looks at inMemoryWorkerToLeasesMapping for lease assignment of a worker and returns true if the worker has
|
|
||||||
* no leases assigned or less than maxNumberOfLeasesPerHost else false.
|
|
||||||
*/
|
|
||||||
public boolean isWorkerAssignedLeasesLessThanMaxLeases(final String workerId) {
|
|
||||||
final Set<Lease> assignedLeases = workerToLeasesMap.get(workerId);
|
|
||||||
if (CollectionUtils.isEmpty(assignedLeases)) {
|
|
||||||
// There are no leases assigned to the worker, that means its less than maxNumberOfLeasesPerHost.
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
return assignedLeases.size() < maxLeasesForWorker;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public Double getTotalAssignedThroughput(final String workerId) {
|
|
||||||
return workerToTotalAssignedThroughputMap.getOrDefault(workerId, 0D);
|
|
||||||
}
|
|
||||||
|
|
||||||
private CompletableFuture<List<WorkerMetricStats>> loadWorkerMetricStats() {
|
|
||||||
return CompletableFuture.supplyAsync(() -> loadWithRetry(workerMetricsDAO::getAllWorkerMetricStats));
|
|
||||||
}
|
|
||||||
|
|
||||||
private CompletableFuture<Map.Entry<List<Lease>, List<String>>> loadLeaseListAsync() {
|
|
||||||
return CompletableFuture.supplyAsync(() ->
|
|
||||||
loadWithRetry(() -> leaseRefresher.listLeasesParallely(LEASE_ASSIGNMENT_CALL_THREAD_POOL, 0)));
|
|
||||||
}
|
|
||||||
|
|
||||||
private <T> T loadWithRetry(final Callable<T> loadFunction) {
|
|
||||||
int retryAttempt = 0;
|
|
||||||
while (true) {
|
|
||||||
try {
|
|
||||||
return loadFunction.call();
|
|
||||||
} catch (final Exception e) {
|
|
||||||
if (retryAttempt < DDB_LOAD_RETRY_ATTEMPT) {
|
|
||||||
log.warn(
|
|
||||||
"Failed to load : {}, retrying",
|
|
||||||
loadFunction.getClass().getName(),
|
|
||||||
e);
|
|
||||||
retryAttempt++;
|
|
||||||
} else {
|
|
||||||
throw new CompletionException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private long getCheckpointOwnerTimeoutTimestampMillis() {
|
|
||||||
// this is a future timestamp in millis that the graceful lease handoff shutdown can be considered
|
|
||||||
// expired. LeaseDurationMillis is used here to account for how long it might take for the
|
|
||||||
// lease owner to receive the shutdown signal before executing shutdown.
|
|
||||||
return getNanoTimeMillis()
|
|
||||||
+ gracefulLeaseHandoffConfig.gracefulLeaseHandoffTimeoutMillis()
|
|
||||||
+ leaseDurationMillis;
|
|
||||||
}
|
|
||||||
|
|
||||||
private long getNanoTimeMillis() {
|
|
||||||
// this is not a wall clock time. But if we stick with using this time provider for calculating the elapsed
|
|
||||||
// time it should be okay to use in checkpoint expiration calculation.
|
|
||||||
return TimeUnit.NANOSECONDS.toMillis(nanoTimeProvider.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean isSameOwners(Lease currentLease, Lease previousLease) {
|
|
||||||
return Objects.equals(currentLease.leaseOwner(), previousLease.leaseOwner())
|
|
||||||
&& Objects.equals(currentLease.checkpointOwner(), previousLease.checkpointOwner());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,363 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package software.amazon.kinesis.coordinator.assignment;
|
|
||||||
|
|
||||||
import java.util.AbstractMap.SimpleEntry;
|
|
||||||
import java.util.ArrayDeque;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.PriorityQueue;
|
|
||||||
import java.util.Queue;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
|
|
||||||
import software.amazon.kinesis.leases.Lease;
|
|
||||||
import software.amazon.kinesis.worker.metricstats.WorkerMetricStats;
|
|
||||||
|
|
||||||
import static java.util.Objects.isNull;
|
|
||||||
import static java.util.Objects.nonNull;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* VarianceBasedLeaseAssignmentDecider
|
|
||||||
* This implementation of LeaseAssignmentDecider performs lease assignment by considering the WorkerMetricStats values of workers
|
|
||||||
* with respect to fleet level average of that WorkerMetricStats.
|
|
||||||
* Rebalanced leases are assigned to workers which has maximum capacity to in terms of throughput to reach fleet level
|
|
||||||
* across the WorkerMetricStats value. In case of multiple WorkerMetricStats, the capacity to reach fleet level average is determined by outlier
|
|
||||||
* WorkerMetricStats.
|
|
||||||
* To minimize the variance, the algorithm picks the fleet level average of the WorkerMetricStats for workers as a
|
|
||||||
* pivot point and uses it to determine workers to take leases from and then assign to other workers.
|
|
||||||
* The threshold for considering a worker for re-balance is configurable via
|
|
||||||
* {@code reBalanceThreshold}. During reassignments the {@code dampeningPercentageValue} is used to achieve
|
|
||||||
* critical dampening.
|
|
||||||
*/
|
|
||||||
@Slf4j
|
|
||||||
@KinesisClientInternalApi
|
|
||||||
public final class VarianceBasedLeaseAssignmentDecider implements LeaseAssignmentDecider {
|
|
||||||
private final LeaseAssignmentManager.InMemoryStorageView inMemoryStorageView;
|
|
||||||
private final int dampeningPercentageValue;
|
|
||||||
private final int reBalanceThreshold;
|
|
||||||
private final boolean allowThroughputOvershoot;
|
|
||||||
private final Map<String, Double> workerMetricsToFleetLevelAverageMap = new HashMap<>();
|
|
||||||
private final PriorityQueue<WorkerMetricStats> assignableWorkerSortedByAvailableCapacity;
|
|
||||||
private int targetLeasePerWorker;
|
|
||||||
|
|
||||||
public VarianceBasedLeaseAssignmentDecider(
|
|
||||||
final LeaseAssignmentManager.InMemoryStorageView inMemoryStorageView,
|
|
||||||
final int dampeningPercentageValue,
|
|
||||||
final int reBalanceThreshold,
|
|
||||||
final boolean allowThroughputOvershoot) {
|
|
||||||
this.inMemoryStorageView = inMemoryStorageView;
|
|
||||||
this.dampeningPercentageValue = dampeningPercentageValue;
|
|
||||||
this.reBalanceThreshold = reBalanceThreshold;
|
|
||||||
this.allowThroughputOvershoot = allowThroughputOvershoot;
|
|
||||||
initialize();
|
|
||||||
final Comparator<WorkerMetricStats> comparator = Comparator.comparingDouble(
|
|
||||||
workerMetrics -> workerMetrics.computePercentageToReachAverage(workerMetricsToFleetLevelAverageMap));
|
|
||||||
this.assignableWorkerSortedByAvailableCapacity = new PriorityQueue<>(comparator.reversed());
|
|
||||||
this.assignableWorkerSortedByAvailableCapacity.addAll(
|
|
||||||
getAvailableWorkersForAssignment(inMemoryStorageView.getActiveWorkerMetrics()));
|
|
||||||
}
|
|
||||||
|
|
||||||
private void initialize() {
|
|
||||||
final Map<String, Double> workerMetricsNameToAverage = inMemoryStorageView.getActiveWorkerMetrics().stream()
|
|
||||||
.flatMap(workerMetrics -> workerMetrics.getMetricStats().keySet().stream()
|
|
||||||
.map(workerMetricsName ->
|
|
||||||
new SimpleEntry<>(workerMetricsName, workerMetrics.getMetricStat(workerMetricsName))))
|
|
||||||
.collect(Collectors.groupingBy(
|
|
||||||
SimpleEntry::getKey, HashMap::new, Collectors.averagingDouble(SimpleEntry::getValue)));
|
|
||||||
|
|
||||||
workerMetricsToFleetLevelAverageMap.putAll(workerMetricsNameToAverage);
|
|
||||||
|
|
||||||
final int totalWorkers =
|
|
||||||
Math.max(inMemoryStorageView.getActiveWorkerMetrics().size(), 1);
|
|
||||||
this.targetLeasePerWorker = Math.max(inMemoryStorageView.getLeaseList().size() / totalWorkers, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<WorkerMetricStats> getAvailableWorkersForAssignment(final List<WorkerMetricStats> workerMetricsList) {
|
|
||||||
// Workers with WorkerMetricStats running hot are also available for assignment as the goal is to balance
|
|
||||||
// utilization
|
|
||||||
// always (e.g., if all workers have hot WorkerMetricStats, balance the variance between them too)
|
|
||||||
return workerMetricsList.stream()
|
|
||||||
.filter(workerMetrics -> inMemoryStorageView.isWorkerTotalThroughputLessThanMaxThroughput(
|
|
||||||
workerMetrics.getWorkerId())
|
|
||||||
&& inMemoryStorageView.isWorkerAssignedLeasesLessThanMaxLeases(workerMetrics.getWorkerId()))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void assignExpiredOrUnassignedLeases(final List<Lease> expiredOrUnAssignedLeases) {
|
|
||||||
// Sort the expiredOrUnAssignedLeases using lastCounterIncrementNanos such that leases expired first are
|
|
||||||
// picked first.
|
|
||||||
// Unassigned leases have lastCounterIncrementNanos as zero and thus assigned first.
|
|
||||||
Collections.sort(expiredOrUnAssignedLeases, Comparator.comparing(Lease::lastCounterIncrementNanos));
|
|
||||||
final Set<Lease> assignedLeases = new HashSet<>();
|
|
||||||
for (final Lease lease : expiredOrUnAssignedLeases) {
|
|
||||||
final WorkerMetricStats workerToAssignLease = assignableWorkerSortedByAvailableCapacity.poll();
|
|
||||||
if (nonNull(workerToAssignLease)) {
|
|
||||||
assignLease(lease, workerToAssignLease);
|
|
||||||
assignedLeases.add(lease);
|
|
||||||
} else {
|
|
||||||
log.info("No worker available to assign lease {}", lease.leaseKey());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
expiredOrUnAssignedLeases.removeAll(assignedLeases);
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<WorkerMetricStats> getWorkersToTakeLeasesFromIfRequired(
|
|
||||||
final List<WorkerMetricStats> currentWorkerMetrics,
|
|
||||||
final String workerMetricsName,
|
|
||||||
final double workerMetricsValueAvg) {
|
|
||||||
final List<WorkerMetricStats> workerIdsAboveAverage = new ArrayList<>();
|
|
||||||
|
|
||||||
final double upperLimit = workerMetricsValueAvg * (1.0D + (double) reBalanceThreshold / 100);
|
|
||||||
final double lowerLimit = workerMetricsValueAvg * (1.0D - (double) reBalanceThreshold / 100);
|
|
||||||
|
|
||||||
WorkerMetricStats mostLoadedWorker = null;
|
|
||||||
|
|
||||||
log.info("Range for re-balance upper threshold {} and lower threshold {}", upperLimit, lowerLimit);
|
|
||||||
|
|
||||||
boolean shouldTriggerReBalance = false;
|
|
||||||
for (final WorkerMetricStats workerMetrics : currentWorkerMetrics) {
|
|
||||||
final double currentWorkerMetricsValue = workerMetrics.getMetricStat(workerMetricsName);
|
|
||||||
final boolean isCurrentWorkerMetricsAboveOperatingRange =
|
|
||||||
workerMetrics.isWorkerMetricAboveOperatingRange(workerMetricsName);
|
|
||||||
/*
|
|
||||||
If there is any worker, whose WorkerMetricStats value is between +/- reBalanceThreshold % of workerMetricsValueAvg or if
|
|
||||||
worker's WorkerMetricStats value is above operating range trigger re-balance
|
|
||||||
*/
|
|
||||||
if (currentWorkerMetricsValue > upperLimit
|
|
||||||
|| currentWorkerMetricsValue < lowerLimit
|
|
||||||
|| isCurrentWorkerMetricsAboveOperatingRange) {
|
|
||||||
shouldTriggerReBalance = true;
|
|
||||||
}
|
|
||||||
// Perform re-balance on the worker if its above upperLimit or if current WorkerMetricStats is above
|
|
||||||
// operating range.
|
|
||||||
if (currentWorkerMetricsValue >= upperLimit || isCurrentWorkerMetricsAboveOperatingRange) {
|
|
||||||
workerIdsAboveAverage.add(workerMetrics);
|
|
||||||
}
|
|
||||||
if (mostLoadedWorker == null
|
|
||||||
|| mostLoadedWorker.getMetricStat(workerMetricsName) < currentWorkerMetricsValue) {
|
|
||||||
mostLoadedWorker = workerMetrics;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
If workerIdsAboveAverage is empty that means there is no worker with WorkerMetricStats value above upperLimit so pick
|
|
||||||
the worker with higher CPU. This can happen when there is worker with WorkerMetricStats value below lowerLimit but
|
|
||||||
all other workers are within upperLimit.
|
|
||||||
*/
|
|
||||||
if (workerIdsAboveAverage.isEmpty()) {
|
|
||||||
workerIdsAboveAverage.add(mostLoadedWorker);
|
|
||||||
}
|
|
||||||
|
|
||||||
return shouldTriggerReBalance ? workerIdsAboveAverage : Collections.emptyList();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs the balancing of the throughput assigned to workers based on the WorkerMetricsValues of worker with respect
|
|
||||||
* to fleet level average.
|
|
||||||
* Each WorkerMetricStats is treated independently to determine workers for re-balance computed (computed based on
|
|
||||||
* reBalanceThreshold) are determined.
|
|
||||||
* The magnitude of throughput to take is determined by how much worker is away from the average of that WorkerMetricStats
|
|
||||||
* across fleet and in case of multiple WorkerMetricStats, the one with maximum magnitude of throughput is considered.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void balanceWorkerVariance() {
|
|
||||||
final List<WorkerMetricStats> activeWorkerMetrics = inMemoryStorageView.getActiveWorkerMetrics();
|
|
||||||
|
|
||||||
log.info("WorkerMetricStats to corresponding fleet level average : {}", workerMetricsToFleetLevelAverageMap);
|
|
||||||
log.info("Active WorkerMetricStats : {}", activeWorkerMetrics);
|
|
||||||
|
|
||||||
final Map<String, Double> workerIdToThroughputToTakeMap = new HashMap<>();
|
|
||||||
String largestOutlierWorkerMetricsName = "";
|
|
||||||
double maxThroughputTake = -1.0D;
|
|
||||||
|
|
||||||
for (final Map.Entry<String, Double> workerMetricsToFleetLevelAverageEntry :
|
|
||||||
workerMetricsToFleetLevelAverageMap.entrySet()) {
|
|
||||||
final String workerMetricsName = workerMetricsToFleetLevelAverageEntry.getKey();
|
|
||||||
|
|
||||||
// Filter workers that does not have current WorkerMetricStats. This is possible if application is adding a
|
|
||||||
// new WorkerMetricStats and currently in phase of deployment.
|
|
||||||
final List<WorkerMetricStats> currentWorkerMetrics = activeWorkerMetrics.stream()
|
|
||||||
.filter(workerMetrics -> workerMetrics.containsMetricStat(workerMetricsName))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
final double fleetAverageForWorkerMetrics = workerMetricsToFleetLevelAverageEntry.getValue();
|
|
||||||
|
|
||||||
final List<WorkerMetricStats> workerToTakeLeasesFrom = getWorkersToTakeLeasesFromIfRequired(
|
|
||||||
currentWorkerMetrics, workerMetricsName, fleetAverageForWorkerMetrics);
|
|
||||||
|
|
||||||
final Map<String, Double> workerIdToThroughputToTakeForCurrentWorkerMetrics = new HashMap<>();
|
|
||||||
double totalThroughputToTakeForCurrentWorkerMetrics = 0D;
|
|
||||||
for (final WorkerMetricStats workerToTakeLease : workerToTakeLeasesFrom) {
|
|
||||||
final double workerMetricsValueForWorker = workerToTakeLease.getMetricStat(workerMetricsName);
|
|
||||||
// Load to take based on the difference compared to the fleet level average
|
|
||||||
final double loadPercentageToTake =
|
|
||||||
(workerMetricsValueForWorker - fleetAverageForWorkerMetrics) / workerMetricsValueForWorker;
|
|
||||||
// Dampen the load based on dampeningPercentageValue
|
|
||||||
final double dampenedLoadPercentageToTake =
|
|
||||||
loadPercentageToTake * ((double) dampeningPercentageValue / 100);
|
|
||||||
final double throughputToTake =
|
|
||||||
inMemoryStorageView.getTotalAssignedThroughput(workerToTakeLease.getWorkerId())
|
|
||||||
* dampenedLoadPercentageToTake;
|
|
||||||
log.info(
|
|
||||||
"For worker : {} taking throughput : {} after dampening based on WorkerMetricStats : {}",
|
|
||||||
workerToTakeLease.getWorkerId(),
|
|
||||||
throughputToTake,
|
|
||||||
workerMetricsName);
|
|
||||||
totalThroughputToTakeForCurrentWorkerMetrics += throughputToTake;
|
|
||||||
workerIdToThroughputToTakeForCurrentWorkerMetrics.put(
|
|
||||||
workerToTakeLease.getWorkerId(), throughputToTake);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
If totalThroughputToTakeForCurrentWorkerMetrics is more than maxThroughputTake that means this WorkerMetricStats is more
|
|
||||||
outlier so consider this for reBalancing
|
|
||||||
*/
|
|
||||||
if (maxThroughputTake < totalThroughputToTakeForCurrentWorkerMetrics) {
|
|
||||||
largestOutlierWorkerMetricsName = workerMetricsName;
|
|
||||||
workerIdToThroughputToTakeMap.clear();
|
|
||||||
workerIdToThroughputToTakeMap.putAll(workerIdToThroughputToTakeForCurrentWorkerMetrics);
|
|
||||||
maxThroughputTake = totalThroughputToTakeForCurrentWorkerMetrics;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info(
|
|
||||||
"Largest outlier WorkerMetricStats is : {} and total of {} throughput will be rebalanced",
|
|
||||||
largestOutlierWorkerMetricsName,
|
|
||||||
maxThroughputTake);
|
|
||||||
log.info("Workers to throughput taken from them is : {}", workerIdToThroughputToTakeMap);
|
|
||||||
|
|
||||||
final List<Map.Entry<String, Double>> sortedWorkerIdToThroughputToTakeEntries =
|
|
||||||
new ArrayList<>(workerIdToThroughputToTakeMap.entrySet());
|
|
||||||
// sort entries by values.
|
|
||||||
Collections.sort(sortedWorkerIdToThroughputToTakeEntries, (e1, e2) -> e2.getValue()
|
|
||||||
.compareTo(e1.getValue()));
|
|
||||||
|
|
||||||
for (final Map.Entry<String, Double> workerIdToThroughputToTakeEntry :
|
|
||||||
sortedWorkerIdToThroughputToTakeEntries) {
|
|
||||||
final String workerId = workerIdToThroughputToTakeEntry.getKey();
|
|
||||||
|
|
||||||
final double throughputToTake = workerIdToThroughputToTakeEntry.getValue();
|
|
||||||
|
|
||||||
final Queue<Lease> leasesToTake = getLeasesToTake(workerId, throughputToTake);
|
|
||||||
|
|
||||||
log.info(
|
|
||||||
"Leases taken from worker : {} are : {}",
|
|
||||||
workerId,
|
|
||||||
leasesToTake.stream().map(Lease::leaseKey).collect(Collectors.toSet()));
|
|
||||||
|
|
||||||
for (final Lease lease : leasesToTake) {
|
|
||||||
final WorkerMetricStats workerToAssign = assignableWorkerSortedByAvailableCapacity.poll();
|
|
||||||
if (nonNull(workerToAssign)
|
|
||||||
&& workerToAssign.willAnyMetricStatsGoAboveAverageUtilizationOrOperatingRange(
|
|
||||||
workerMetricsToFleetLevelAverageMap,
|
|
||||||
inMemoryStorageView.getTargetAverageThroughput(),
|
|
||||||
lease.throughputKBps(),
|
|
||||||
targetLeasePerWorker)) {
|
|
||||||
log.info("No worker to assign anymore in this iteration due to hitting average values");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (nonNull(workerToAssign)) {
|
|
||||||
assignLease(lease, workerToAssign);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
printWorkerToUtilizationLog(inMemoryStorageView.getActiveWorkerMetrics());
|
|
||||||
}
|
|
||||||
|
|
||||||
private Queue<Lease> getLeasesToTake(final String workerId, final double throughputToTake) {
|
|
||||||
final Set<Lease> existingLeases =
|
|
||||||
inMemoryStorageView.getWorkerToLeasesMap().get(workerId);
|
|
||||||
|
|
||||||
if (isNull(existingLeases) || existingLeases.isEmpty()) {
|
|
||||||
return new ArrayDeque<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (inMemoryStorageView.getTotalAssignedThroughput(workerId) == 0D) {
|
|
||||||
// This is the case where throughput of this worker is zero and have 1 or more leases assigned.
|
|
||||||
// Its not possible to determine leases to take based on throughput so simply take 1 lease and move on.
|
|
||||||
return new ArrayDeque<>(new ArrayList<>(existingLeases).subList(0, 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
return getLeasesCombiningToThroughput(workerId, throughputToTake);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void assignLease(final Lease lease, final WorkerMetricStats workerMetrics) {
|
|
||||||
if (nonNull(lease.actualOwner()) && lease.actualOwner().equals(workerMetrics.getWorkerId())) {
|
|
||||||
// if a new owner and current owner are same then no assignment to do
|
|
||||||
// put back the worker as well as no assignment is done
|
|
||||||
assignableWorkerSortedByAvailableCapacity.add(workerMetrics);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
workerMetrics.extrapolateMetricStatValuesForAddedThroughput(
|
|
||||||
workerMetricsToFleetLevelAverageMap,
|
|
||||||
inMemoryStorageView.getTargetAverageThroughput(),
|
|
||||||
lease.throughputKBps(),
|
|
||||||
targetLeasePerWorker);
|
|
||||||
log.info("Assigning lease : {} to worker : {}", lease.leaseKey(), workerMetrics.getWorkerId());
|
|
||||||
inMemoryStorageView.performLeaseAssignment(lease, workerMetrics.getWorkerId());
|
|
||||||
if (inMemoryStorageView.isWorkerTotalThroughputLessThanMaxThroughput(workerMetrics.getWorkerId())
|
|
||||||
&& inMemoryStorageView.isWorkerAssignedLeasesLessThanMaxLeases(workerMetrics.getWorkerId())) {
|
|
||||||
assignableWorkerSortedByAvailableCapacity.add(workerMetrics);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void printWorkerToUtilizationLog(final List<WorkerMetricStats> activeWorkerMetrics) {
|
|
||||||
activeWorkerMetrics.forEach(workerMetrics -> log.info(
|
|
||||||
"WorkerId : {} and average WorkerMetricStats data : {}",
|
|
||||||
workerMetrics.getWorkerId(),
|
|
||||||
workerMetrics.getMetricStatsMap()));
|
|
||||||
}
|
|
||||||
|
|
||||||
private Queue<Lease> getLeasesCombiningToThroughput(final String workerId, final double throughputToGet) {
|
|
||||||
final List<Lease> assignedLeases =
|
|
||||||
new ArrayList<>(inMemoryStorageView.getWorkerToLeasesMap().get(workerId));
|
|
||||||
if (assignedLeases.isEmpty()) {
|
|
||||||
// This is possible if the worker is having high utilization but does not have any leases assigned to it
|
|
||||||
return new ArrayDeque<>();
|
|
||||||
}
|
|
||||||
// Shuffle leases to randomize what leases gets picked.
|
|
||||||
Collections.shuffle(assignedLeases);
|
|
||||||
final Queue<Lease> response = new ArrayDeque<>();
|
|
||||||
double remainingThroughputToGet = throughputToGet;
|
|
||||||
for (final Lease lease : assignedLeases) {
|
|
||||||
// if adding this lease makes throughout to take go below zero avoid taking this lease.
|
|
||||||
if (remainingThroughputToGet - lease.throughputKBps() <= 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
remainingThroughputToGet -= lease.throughputKBps();
|
|
||||||
response.add(lease);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If allowThroughputOvershoot is set to true, take a minimum throughput lease
|
|
||||||
if (allowThroughputOvershoot && response.isEmpty()) {
|
|
||||||
assignedLeases.stream()
|
|
||||||
.min(Comparator.comparingDouble(Lease::throughputKBps))
|
|
||||||
.ifPresent(response::add);
|
|
||||||
}
|
|
||||||
return response;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright 2024 Amazon.com, Inc. or its affiliates.
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package software.amazon.kinesis.coordinator.migration;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ClientVersion support during upgrade from KCLv2.x to KCLv3.x
|
|
||||||
*
|
|
||||||
* This enum is persisted in storage, so any changes to it needs to be backward compatible.
|
|
||||||
* Reorganizing the values is not backward compatible, also if versions are removed, the corresponding
|
|
||||||
* enum value cannot be reused without backward compatibility considerations.
|
|
||||||
*/
|
|
||||||
public enum ClientVersion {
|
|
||||||
/**
|
|
||||||
* This is a transient start state version used during initialization of the Migration State Machine.
|
|
||||||
*/
|
|
||||||
CLIENT_VERSION_INIT,
|
|
||||||
/**
|
|
||||||
* This version is used during the upgrade of an application from KCLv2.x to KCLv3.x, in this version
|
|
||||||
* KCL workers will emit WorkerMetricStats and run KCLv2.x algorithms for leader election and lease
|
|
||||||
* assignment. KCL will also monitor for upgrade to KCLv3.x readiness of the worker fleet.
|
|
||||||
*/
|
|
||||||
CLIENT_VERSION_UPGRADE_FROM_2X,
|
|
||||||
/**
|
|
||||||
* This version is used during rollback from CLIENT_VERSION_UPGRADE_FROM_2X or CLIENT_VERSION_3X_WITH_ROLLBACK,
|
|
||||||
* which can only be initiated using a KCL migration tool, when customer wants to revert to KCLv2.x functionality.
|
|
||||||
* In this version, KCL will not emit WorkerMetricStats and run KCLv2.x algorithms for leader election
|
|
||||||
* and lease assignment. In this version, KCL will monitor for roll-forward scenario where
|
|
||||||
* client version is updated to CLIENT_VERSION_UPGRADE_FROM_2X using the migration tool.
|
|
||||||
*/
|
|
||||||
CLIENT_VERSION_2X,
|
|
||||||
/**
|
|
||||||
* When workers are operating in CLIENT_VERSION_UPGRADE_FROM_2X and when worker fleet is determined to be
|
|
||||||
* KCLv3.x ready (when lease table GSI is active and worker-metrics are being emitted by all lease owners)
|
|
||||||
* then the leader will initiate the switch to KCLv3.x algorithms for leader election and lease assignment,
|
|
||||||
* by using this version and persisting it in the {@link MigrationState} that allows all worker hosts
|
|
||||||
* to also flip to KCLv3.x functionality. In this KCL will also monitor for rollback to detect when the
|
|
||||||
* customer updates version to CLIENT_VERSION_2X using migration tool, so that it instantly flips back
|
|
||||||
* to CLIENT_VERSION_2X.
|
|
||||||
*/
|
|
||||||
CLIENT_VERSION_3X_WITH_ROLLBACK,
|
|
||||||
/**
|
|
||||||
* A new application starting KCLv3.x or an upgraded application from KCLv2.x after upgrade is successful
|
|
||||||
* can use this version to default all KCLv3.x algorithms without any monitor to rollback.
|
|
||||||
*/
|
|
||||||
CLIENT_VERSION_3X;
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue