diff --git a/.github/workflows/vmware-go-kcl-v2-ci.yml b/.github/workflows/vmware-go-kcl-v2-ci.yml
new file mode 100755
index 0000000..3c5a8d3
--- /dev/null
+++ b/.github/workflows/vmware-go-kcl-v2-ci.yml
@@ -0,0 +1,68 @@
+name: vmware-go-kcl-v2
+
+on:
+ push:
+ branches: [ master, upgrade/aws-sdk-to-v2 ]
+ paths-ignore: [ README.md ]
+ pull_request:
+ branches: [ master ]
+ paths-ignore: [ README.md ]
+
+jobs:
+ build:
+ name: Continous Integration
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v2
+
+ - name: Set up Go 1.17.x
+ uses: actions/setup-go@v2
+ with:
+ go-version: ^1.17
+ id: go
+
+ - name: Build
+ shell: bash
+ run: |
+ make build
+
+ # - name: Test
+ # shell: bash
+ # run: |
+ # make test
+
+ scans:
+ name: Checks, Lints and Scans
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v2
+
+ - name: Set up Go 1.17.x
+ uses: actions/setup-go@v2
+ with:
+ go-version: ^1.17
+ id: go
+
+ - name: Format Check
+ shell: bash
+ run: |
+ make format-check
+
+ - name: Lint
+ shell: bash
+ run: |
+ make lint-docker
+
+ - name: Run Gosec Security Scanner
+ uses: securego/gosec@master
+ with:
+ # let the report trigger content trigger a failure using the GitHub Security features.
+ args: '-no-fail -fmt sarif -out results.sarif -exclude-dir internal -exclude-dir vendor -severity high ./...'
+
+ - name: Upload SARIF file
+ uses: github/codeql-action/upload-sarif@v1
+ with:
+ # path to SARIF file relative to the root of the repository
+ sarif_file: results.sarif
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 5e7d273..85afb96 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,25 @@
*
# Except this file
!.gitignore
+=======
+/src/gen
+/src/vendor
+!/src/vendor/manifest
+/bin
+/pkg
+/tmp
+/log
+/vms
+/run
+/go
+.hmake
+.hmakerc
+.project
+.idea
+.vscode
+*_mock_test.go
+filenames
+
+.DS_Store
+.scannerwork/
+*.sarif
\ No newline at end of file
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..ef3128e
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,5 @@
+[gerrit]
+host=review.ec.eng.vmware.com
+port=29418
+project=cascade-kinesis-client
+defaultbranch=develop
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f1bf47d..befab67 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -5,6 +5,8 @@ read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All cont
signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on
as an open-source patch.
+## Community
+
## Contribution Flow
This is a rough outline of what a contributor's workflow looks like:
@@ -31,7 +33,7 @@ When your branch gets out of sync with the vmware/main branch, use the following
``` shell
git checkout my-new-feature
git fetch -a
-git pull --rebase upstream main
+git pull --rebase upstream master
git push --force-with-lease origin my-new-feature
```
@@ -61,8 +63,6 @@ git push --force-with-lease origin my-new-feature
Be sure to add a comment to the PR indicating your new changes are ready to review, as GitHub does not generate a
notification when you git push.
-### Code Style
-
### Formatting Commit Messages
We follow the conventions on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/).
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..3e782c5
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 VMware, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..ef02f13
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,63 @@
+.PHONY: help
+help: ## - Show this help message
+ @printf "\033[32m\xE2\x9c\x93 usage: make [target]\n\n\033[0m"
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+.PHONY: up
+up: ## - start docker compose
+ @ cd _support/docker && docker-compose -f docker-compose.yml up
+
+.PHONY: build-common
+build-common: ## - execute build common tasks clean and mod tidy
+ @ go version
+ @ go clean
+ @ go mod download && go mod tidy
+ @ go mod verify
+
+.PHONY: build
+build: build-common ## - build a debug binary to the current platform (windows, linux or darwin(mac))
+ @ echo building
+ @ go build -v ./...
+ @ echo "done"
+
+.PHONY: format-check
+format-check: ## - check files format using gofmt
+ @ ./_support/scripts/ci.sh fmtcheck
+
+.PHONY: format-check
+format: ## - apply golang file format using gofmt
+ @ ./_support/scripts/ci.sh format
+
+.PHONY: test
+test: build-common ## - execute go test command for unit and mocked tests
+ @ ./_support/scripts/ci.sh unittest
+
+.PHONY: integration-test
+integration-test: ## - execute go test command for integration tests (aws credentials needed)
+ @ go test -v -cover -race ./test
+
+.PHONY: scan
+scan: ## - execute static code analysis
+ @ ./_support/scripts/ci.sh scan
+
+.PHONY: local-scan
+local-scan: ## - execute static code analysis locally
+ @ ./_support/scripts/ci.sh localScan
+
+.PHONY: lint
+lint: ## - runs golangci-lint
+ @ ./_support/scripts/ci.sh lint
+
+.PHONY: lint-docker
+lint-docker: ## - runs golangci-lint with docker container
+ @ ./_support/scripts/ci.sh lintDocker
+
+.PHONY: sonar-scan
+sonar-scan: ## - start sonar qube locally with docker (you will need docker installed in your machine)
+ @ # after start, setup a new project with the name sms-local and a new token sms-token, fill the token against the -Dsonar.login= parameter.
+ @ # login with user: admin pwd: vmware
+ @ $(SHELL) _support/scripts/sonar-scan.sh
+
+.PHONY: sonar-stop
+sonar-stop: ## - stop sonar qube docker container
+ @ docker stop sonarqube
diff --git a/README.md b/README.md
index 9db664b..af388e4 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,7 @@
# vmware-go-kcl-v2
+[](https://github.com/fafg/vmware-go-kcl/actions/workflows/vmware-go-kcl-v2-ci.yml)
+
## Overview
## Try it out
@@ -27,3 +29,35 @@ as an open-source patch. For more detailed information, refer to [CONTRIBUTING.m
## License
+### Build & Run
+
+```sh
+hmake
+
+# security scan
+hmake scanast
+
+# run test
+hmake check
+
+# run integration test
+# update the worker_test.go to let it point to your Kinesis stream
+hmake test
+```
+
+## Documentation
+
+VMware-Go-KCL matches exactly the same interface and programming model from original Amazon KCL, the best place for getting reference, tutorial is from Amazon itself:
+
+- [Developing Consumers Using the Kinesis Client Library](https://docs.aws.amazon.com/streams/latest/dev/developing-consumers-with-kcl.html)
+- [Troubleshooting](https://docs.aws.amazon.com/streams/latest/dev/troubleshooting-consumers.html)
+- [Advanced Topics](https://docs.aws.amazon.com/streams/latest/dev/advanced-consumers.html)
+
+
+## Contributing
+
+The vmware-go-kcl project team welcomes contributions from the community. Before you start working with vmware-go-kcl, please read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All contributions to this repository must be signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch. For more detailed information, refer to [CONTRIBUTING.md](CONTRIBUTING.md).
+
+## License
+
+MIT License
diff --git a/_support/scripts/ci.sh b/_support/scripts/ci.sh
new file mode 100755
index 0000000..245802f
--- /dev/null
+++ b/_support/scripts/ci.sh
@@ -0,0 +1,128 @@
+#!/usr/bin/env bash
+
+function local_go_pkgs() {
+ find './clientlibrary' -name '*.go' | \
+ grep -Fv '/vendor/' | \
+ grep -Fv '/go/' | \
+ grep -Fv '/gen/' | \
+ grep -Fv '/tmp/' | \
+ grep -Fv '/run/' | \
+ grep -Fv '/tests/' | \
+ sed -r 's|(.+)/[^/]+\.go$|\1|g' | \
+ sort -u
+}
+
+function checkfmt() {
+ local files=""
+ files="$(find . -type f -iname "*.go" -exec gofmt -l {} \;)"
+
+ if [ -n "$files" ]; then
+ echo "You need to run \"gofmt -w ./\" to fix your formatting."
+ echo "$files" >&2
+ return 1
+ fi
+}
+
+function goFormat() {
+ echo "go formatting..."
+ gofmt -w ./
+ echo "done"
+}
+
+function lint() {
+ # golangci-lint run --enable-all -D forbidigo -D gochecknoglobals -D gofumpt -D gofmt -D nlreturn
+
+ golangci-lint run \
+ --skip-files=_mock.go \
+ --skip-dirs=test \
+ --skip-dirs=internal \
+ --timeout=600s \
+ --verbose
+}
+
+function lintDocker() {
+ lintVersion="1.41.1"
+ lintImage="golangci/golangci-lint:v$lintVersion-alpine"
+
+ docker run --rm -v "${PWD}":/app -w /app "$lintImage" golangci-lint run \
+ --skip-files=_mock.go \
+ --skip-dirs=test \
+ --skip-dirs=internal \
+ --timeout=600s \
+ --verbose
+}
+
+function unitTest() {
+ go list ./... | grep -v /test | \
+ xargs -L 1 -I% bash -c 'echo -e "\n**************** Package: % ****************" && go test % -v -cover -race ./...'
+}
+
+function scanast() {
+ gosec version
+ gosec ./... > security.log 2>&1
+
+ local issues=""
+ issues=$(grep -c 'Severity: MEDIUM' security.log | grep -v deaggregator | grep -c _)
+ if [ -n "$issues" ] && [ "$issues" -gt 0 ]; then
+ echo ""
+ echo "Medium Severity Issues:"
+ grep -e "Severity: MEDIUM" -A 1 security.log
+ echo "$issues" "medium severity issues found."
+ fi
+
+ local issues=""
+ local issues_count=""
+ issues="$(grep -E 'Severity: HIGH' security.log | grep -v vendor)"
+ issues_count="$(grep -E 'Severity: HIGH' security.log | grep -v vendor | grep -c _)"
+ if [ -n "$issues_count" ] && [ "$issues_count" -gt 0 ]; then
+ echo ""
+ echo "High Severity Issues:"
+ grep -E "Severity: HIGH" -A 1 security.log
+ echo "$issues_count" "high severity issues found."
+ echo "$issues"
+ echo "You need to resolve the high severity issues at the least."
+ exit 1
+ fi
+
+ local issues=""
+ local issues_count=""
+ issues="$(grep -E 'Errors unhandled' security.log | grep -v vendor | grep -v /src/go/src)"
+ issues_count="$(grep -E 'Errors unhandled' security.log | grep -v vendor | grep -v /src/go/src | grep -c _)"
+ if [ -n "$issues_count" ] && [ "$issues_count" -gt 0 ]; then
+ echo ""
+ echo "Unhandled errors:"
+ grep -E "Errors unhandled" security.log
+ echo "$issues_count" "unhandled errors, please indicate with the right comment that this case is ok, or handle the error."
+ echo "$issues"
+ echo "You need to resolve the all unhandled errors."
+ exit 1
+ fi
+
+ rm -f security.log
+}
+
+function Scan() {
+ gosec -fmt=sarif -out=results.sarif -exclude-dir=internal -exclude-dir=vendor -severity=high ./...
+}
+
+function localScan() {
+ # you can use the vs code plugin https://marketplace.visualstudio.com/items?itemName=MS-SarifVSCode.sarif-viewer
+ # to navigate against the issues
+ gosec -fmt=sarif -out=results.sarif -exclude-dir=internal -exclude-dir=vendor ./...
+}
+
+function usage() {
+ echo "check.sh fmt|lint" >&2
+ exit 2
+}
+
+case "$1" in
+ fmtcheck) checkfmt ;;
+ format) goFormat ;;
+ lint) lint ;;
+ lintDocker) lintDocker ;;
+ unittest) unitTest ;;
+ scan) scan ;;
+ localScan) localScan ;;
+ *) usage ;;
+esac
diff --git a/_support/scripts/sonar-scan.sh b/_support/scripts/sonar-scan.sh
new file mode 100644
index 0000000..4fcb34a
--- /dev/null
+++ b/_support/scripts/sonar-scan.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+########################
+# requirements: #
+# 0. docker #
+# 1. wget #
+# 2. curl #
+# 3. jq #
+# 4. sonar-scanner #
+########################
+
+set -e
+
+projectKey="vmware-go-kcl-v2"
+user_tokenName="local_token"
+username="admin"
+user_password="admin"
+new_password="vmware"
+url="http://localhost"
+port="9000"
+
+if [[ "$( docker container inspect -f '{{.State.Running}}' sonarqube )" == "true" ]];
+then
+ docker ps
+else
+ docker run --rm -d --name sonarqube -e SONAR_ES_BOOTSTRAP_CHECKS_DISABLE=true -p 9000:9000 sonarqube
+fi
+
+echo "waiting for sonarqube starts..."
+wget -q -O - "$@" http://localhost:9000 | awk '/STARTING/{ print $0 }' | xargs
+
+STATUS="$(wget -q -O - "$@" http://localhost:9000 | awk '/UP/{ print $0 }')"
+while [ -z "$STATUS" ]
+do
+ sleep 2
+ STATUS="$(wget -q -O - "$@" http://localhost:9000 | awk '/UP/{ print $0 }')"
+ printf "."
+done
+
+printf '\n %s' "${STATUS}" | xargs
+echo ""
+
+# change the default password to avoid create a new one when login for the very first time
+curl -u ${username}:${user_password} -X POST "${url}:${port}/api/users/change_password?login=${username}&previousPassword=${user_password}&password=${new_password}"
+
+# search the specific user tokens for SonarQube
+hasToken=$(curl --silent -u ${username}:${new_password} -X GET "${url}:${port}/api/user_tokens/search")
+if [[ -n "${hasToken}" ]]; then
+ # Revoke the user token for SonarQube
+ curl -X POST -H "Content-Type: application/x-www-form-urlencoded" -d "name=${user_tokenName}" -u ${username}:${new_password} "${url}:${port}"/api/user_tokens/revoke
+fi
+
+# generate new token
+token=$(curl --silent -X POST -H "Content-Type: application/x-www-form-urlencoded" -d "name=${user_tokenName}" -u ${username}:${new_password} "${url}:${port}"/api/user_tokens/generate | jq '.token' | xargs)
+
+# scan and push the results to localhost docker container
+sonar-scanner -Dsonar.projectKey="${projectKey}" \
+ -Dsonar.projectName="${projectKey}" \
+ -Dsonar.sources=. \
+ -Dsonar.exclusions="internal/records/**, test/**" \
+ -Dsonar.host.url="${url}:${port}" \
+ -Dsonar.login="${token}"
+
diff --git a/clientlibrary/checkpoint/checkpointer.go b/clientlibrary/checkpoint/checkpointer.go
new file mode 100644
index 0000000..fff1a51
--- /dev/null
+++ b/clientlibrary/checkpoint/checkpointer.go
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package checkpoint
+// The implementation is derived from https://github.com/patrobinson/gokini
+//
+// Copyright 2018 Patrick robinson
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+package checkpoint
+
+import (
+ "errors"
+ "fmt"
+
+ par "github.com/vmware/vmware-go-kcl/clientlibrary/partition"
+)
+
+const (
+ LeaseKeyKey = "ShardID"
+ LeaseOwnerKey = "AssignedTo"
+ LeaseTimeoutKey = "LeaseTimeout"
+ SequenceNumberKey = "Checkpoint"
+ ParentShardIdKey = "ParentShardId"
+ ClaimRequestKey = "ClaimRequest"
+
+ // ShardEnd We've completely processed all records in this shard.
+ ShardEnd = "SHARD_END"
+
+ // ErrShardClaimed is returned when shard is claimed
+ ErrShardClaimed = "shard is already claimed by another node"
+)
+
+type ErrLeaseNotAcquired struct {
+ cause string
+}
+
+func (e ErrLeaseNotAcquired) Error() string {
+ return fmt.Sprintf("lease not acquired: %s", e.cause)
+}
+
+// Checkpointer handles checkpointing when a record has been processed
+type Checkpointer interface {
+ // Init initialises the Checkpoint
+ Init() error
+
+ // GetLease attempts to gain a lock on the given shard
+ GetLease(*par.ShardStatus, string) error
+
+ // CheckpointSequence writes a checkpoint at the designated sequence ID
+ CheckpointSequence(*par.ShardStatus) error
+
+ // FetchCheckpoint retrieves the checkpoint for the given shard
+ FetchCheckpoint(*par.ShardStatus) error
+
+ // RemoveLeaseInfo to remove lease info for shard entry because the shard no longer exists
+ RemoveLeaseInfo(string) error
+
+ // RemoveLeaseOwner to remove lease owner for the shard entry to make the shard available for reassignment
+ RemoveLeaseOwner(string) error
+
+ // ListActiveWorkers returns active workers and their shards (New Lease Stealing Methods)
+ ListActiveWorkers(map[string]*par.ShardStatus) (map[string][]*par.ShardStatus, error)
+
+ // ClaimShard claims a shard for stealing
+ ClaimShard(*par.ShardStatus, string) error
+}
+
+// ErrSequenceIDNotFound is returned by FetchCheckpoint when no SequenceID is found
+var ErrSequenceIDNotFound = errors.New("SequenceIDNotFoundForShard")
+
+// ErrShardNotAssigned is returned by ListActiveWorkers when no AssignedTo is found
+var ErrShardNotAssigned = errors.New("AssignedToNotFoundForShard")
diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go
new file mode 100644
index 0000000..dc7e8cd
--- /dev/null
+++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package checkpoint
+// The implementation is derived from https://github.com/patrobinson/gokini
+//
+// Copyright 2018 Patrick robinson
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+package checkpoint
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ awsConfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+
+ "github.com/vmware/vmware-go-kcl/clientlibrary/config"
+ par "github.com/vmware/vmware-go-kcl/clientlibrary/partition"
+ "github.com/vmware/vmware-go-kcl/logger"
+)
+
+const (
+ // NumMaxRetries is the max times of doing retry
+ NumMaxRetries = 10
+)
+
+// DynamoCheckpoint implements the Checkpoint interface using DynamoDB as a backend
+type DynamoCheckpoint struct {
+ log logger.Logger
+ TableName string
+ leaseTableReadCapacity int64
+ leaseTableWriteCapacity int64
+
+ LeaseDuration int
+ svc *dynamodb.Client
+ kclConfig *config.KinesisClientLibConfiguration
+ Retries int
+ lastLeaseSync time.Time
+}
+
+func NewDynamoCheckpoint(kclConfig *config.KinesisClientLibConfiguration) *DynamoCheckpoint {
+ checkpointer := &DynamoCheckpoint{
+ log: kclConfig.Logger,
+ TableName: kclConfig.TableName,
+ leaseTableReadCapacity: int64(kclConfig.InitialLeaseTableReadCapacity),
+ leaseTableWriteCapacity: int64(kclConfig.InitialLeaseTableWriteCapacity),
+ LeaseDuration: kclConfig.FailoverTimeMillis,
+ kclConfig: kclConfig,
+ Retries: NumMaxRetries,
+ }
+
+ return checkpointer
+}
+
+// WithDynamoDB is used to provide DynamoDB service
+func (checkpointer *DynamoCheckpoint) WithDynamoDB(svc *dynamodb.Client) *DynamoCheckpoint {
+ checkpointer.svc = svc
+ return checkpointer
+}
+
+// Init initialises the DynamoDB Checkpoint
+func (checkpointer *DynamoCheckpoint) Init() error {
+ checkpointer.log.Infof("Creating DynamoDB session")
+
+ if checkpointer.svc == nil {
+ resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ PartitionID: "aws",
+ URL: checkpointer.kclConfig.DynamoDBEndpoint,
+ SigningRegion: checkpointer.kclConfig.RegionName,
+ }, nil
+ })
+
+ cfg, err := awsConfig.LoadDefaultConfig(
+ context.TODO(),
+ awsConfig.WithRegion(checkpointer.kclConfig.RegionName),
+ awsConfig.WithCredentialsProvider(
+ credentials.NewStaticCredentialsProvider(
+ checkpointer.kclConfig.DynamoDBCredentials.Value.AccessKeyID,
+ checkpointer.kclConfig.DynamoDBCredentials.Value.SecretAccessKey,
+ checkpointer.kclConfig.DynamoDBCredentials.Value.SessionToken)),
+ awsConfig.WithEndpointResolver(resolver),
+ awsConfig.WithRetryer(func() aws.Retryer {
+ return retry.AddWithMaxBackoffDelay(retry.NewStandard(), retry.DefaultMaxBackoff)
+ }),
+ )
+
+ if err != nil {
+ checkpointer.log.Fatalf("unable to load SDK config, %v", err)
+ }
+
+ checkpointer.svc = dynamodb.NewFromConfig(cfg)
+ }
+
+ if !checkpointer.doesTableExist() {
+ return checkpointer.createTable()
+ }
+
+ return nil
+}
+
+// GetLease attempts to gain a lock on the given shard
+func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssignTo string) error {
+ newLeaseTimeout := time.Now().Add(time.Duration(checkpointer.LeaseDuration) * time.Millisecond).UTC()
+ newLeaseTimeoutString := newLeaseTimeout.Format(time.RFC3339)
+ currentCheckpoint, err := checkpointer.getItem(shard.ID)
+ if err != nil {
+ return err
+ }
+
+ isClaimRequestExpired := shard.IsClaimRequestExpired(checkpointer.kclConfig)
+
+ var claimRequest string
+ if checkpointer.kclConfig.EnableLeaseStealing {
+ if currentCheckpointClaimRequest, ok := currentCheckpoint[ClaimRequestKey]; ok &&
+ currentCheckpointClaimRequest.(*types.AttributeValueMemberS).Value != "" {
+ claimRequest = currentCheckpointClaimRequest.(*types.AttributeValueMemberS).Value
+ if newAssignTo != claimRequest && !isClaimRequestExpired {
+ checkpointer.log.Debugf("another worker: %s has a claim on this shard. Not going to renew the lease", claimRequest)
+ return errors.New(ErrShardClaimed)
+ }
+ }
+ }
+
+ assignedVar, assignedToOk := currentCheckpoint[LeaseOwnerKey]
+ leaseVar, leaseTimeoutOk := currentCheckpoint[LeaseTimeoutKey]
+
+ var conditionalExpression string
+ var expressionAttributeValues map[string]types.AttributeValue
+
+ if !leaseTimeoutOk || !assignedToOk {
+ conditionalExpression = "attribute_not_exists(AssignedTo)"
+ } else {
+ assignedTo := assignedVar.(*types.AttributeValueMemberS).Value
+ leaseTimeout := leaseVar.(*types.AttributeValueMemberS).Value
+
+ currentLeaseTimeout, err := time.Parse(time.RFC3339, leaseTimeout)
+ if err != nil {
+ return err
+ }
+
+ if checkpointer.kclConfig.EnableLeaseStealing {
+ if time.Now().UTC().Before(currentLeaseTimeout) && assignedTo != newAssignTo && !isClaimRequestExpired {
+ return ErrLeaseNotAcquired{"current lease timeout not yet expired"}
+ }
+ } else {
+ if time.Now().UTC().Before(currentLeaseTimeout) && assignedTo != newAssignTo {
+ return ErrLeaseNotAcquired{"current lease timeout not yet expired"}
+ }
+ }
+
+ checkpointer.log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s, newAssignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo, newAssignTo)
+ conditionalExpression = "ShardID = :id AND AssignedTo = :assigned_to AND LeaseTimeout = :lease_timeout"
+ expressionAttributeValues = map[string]types.AttributeValue{
+ ":id": &types.AttributeValueMemberS{
+ Value: shard.ID,
+ },
+ ":assigned_to": &types.AttributeValueMemberS{
+ Value: assignedTo,
+ },
+ ":lease_timeout": &types.AttributeValueMemberS{
+ Value: leaseTimeout,
+ },
+ }
+ }
+
+ marshalledCheckpoint := map[string]types.AttributeValue{
+ LeaseKeyKey: &types.AttributeValueMemberS{
+ Value: shard.ID,
+ },
+ LeaseOwnerKey: &types.AttributeValueMemberS{
+ Value: newAssignTo,
+ },
+ LeaseTimeoutKey: &types.AttributeValueMemberS{
+ Value: newLeaseTimeoutString,
+ },
+ }
+
+ if len(shard.ParentShardId) > 0 {
+ marshalledCheckpoint[ParentShardIdKey] = &types.AttributeValueMemberS{
+ Value: shard.ParentShardId,
+ }
+ }
+
+ if checkpoint := shard.GetCheckpoint(); checkpoint != "" {
+ marshalledCheckpoint[SequenceNumberKey] = &types.AttributeValueMemberS{
+ Value: checkpoint,
+ }
+ }
+
+ if checkpointer.kclConfig.EnableLeaseStealing {
+ if claimRequest != "" && claimRequest == newAssignTo && !isClaimRequestExpired {
+ if expressionAttributeValues == nil {
+ expressionAttributeValues = make(map[string]types.AttributeValue)
+ }
+ conditionalExpression = conditionalExpression + " AND ClaimRequest = :claim_request"
+ expressionAttributeValues[":claim_request"] = &types.AttributeValueMemberS{
+ Value: claimRequest,
+ }
+ }
+ }
+
+ err = checkpointer.conditionalUpdate(conditionalExpression, expressionAttributeValues, marshalledCheckpoint)
+ if err != nil {
+ var conditionalCheckErr *types.ConditionalCheckFailedException
+ if errors.As(err, &conditionalCheckErr) {
+ return ErrLeaseNotAcquired{conditionalCheckErr.ErrorMessage()}
+ }
+ return err
+ }
+
+ shard.Mux.Lock()
+ shard.AssignedTo = newAssignTo
+ shard.LeaseTimeout = newLeaseTimeout
+ shard.Mux.Unlock()
+
+ return nil
+}
+
+// CheckpointSequence writes a checkpoint at the designated sequence ID
+func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *par.ShardStatus) error {
+ leaseTimeout := shard.GetLeaseTimeout().UTC().Format(time.RFC3339)
+ marshalledCheckpoint := map[string]types.AttributeValue{
+ LeaseKeyKey: &types.AttributeValueMemberS{
+ Value: shard.ID,
+ },
+ SequenceNumberKey: &types.AttributeValueMemberS{
+ Value: shard.GetCheckpoint(),
+ },
+ LeaseOwnerKey: &types.AttributeValueMemberS{
+ Value: shard.GetLeaseOwner(),
+ },
+ LeaseTimeoutKey: &types.AttributeValueMemberS{
+ Value: leaseTimeout,
+ },
+ }
+
+ if len(shard.ParentShardId) > 0 {
+ marshalledCheckpoint[ParentShardIdKey] = &types.AttributeValueMemberS{Value: shard.ParentShardId}
+ }
+
+ return checkpointer.saveItem(marshalledCheckpoint)
+}
+
+// FetchCheckpoint retrieves the checkpoint for the given shard
+func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *par.ShardStatus) error {
+ checkpoint, err := checkpointer.getItem(shard.ID)
+ if err != nil {
+ return err
+ }
+
+ sequenceID, ok := checkpoint[SequenceNumberKey]
+ if !ok {
+ return ErrSequenceIDNotFound
+ }
+
+ checkpointer.log.Debugf("Retrieved Shard Iterator %s", sequenceID.(*types.AttributeValueMemberS).Value)
+ shard.SetCheckpoint(sequenceID.(*types.AttributeValueMemberS).Value)
+
+ if assignedTo, ok := checkpoint[LeaseOwnerKey]; ok {
+ shard.SetLeaseOwner(assignedTo.(*types.AttributeValueMemberS).Value)
+ }
+
+ // Use up-to-date leaseTimeout to avoid ConditionalCheckFailedException when claiming
+ if leaseTimeout, ok := checkpoint[LeaseTimeoutKey]; ok && leaseTimeout.(*types.AttributeValueMemberS).Value != "" {
+ currentLeaseTimeout, err := time.Parse(time.RFC3339, leaseTimeout.(*types.AttributeValueMemberS).Value)
+ if err != nil {
+ return err
+ }
+ shard.LeaseTimeout = currentLeaseTimeout
+ }
+
+ return nil
+}
+
+// RemoveLeaseInfo to remove lease info for shard entry in dynamoDB because the shard no longer exists in Kinesis
+func (checkpointer *DynamoCheckpoint) RemoveLeaseInfo(shardID string) error {
+ err := checkpointer.removeItem(shardID)
+
+ if err != nil {
+ checkpointer.log.Errorf("Error in removing lease info for shard: %s, Error: %+v", shardID, err)
+ } else {
+ checkpointer.log.Infof("Lease info for shard: %s has been removed.", shardID)
+ }
+
+ return err
+}
+
+// RemoveLeaseOwner to remove lease owner for the shard entry
+func (checkpointer *DynamoCheckpoint) RemoveLeaseOwner(shardID string) error {
+ input := &dynamodb.UpdateItemInput{
+ TableName: aws.String(checkpointer.TableName),
+ Key: map[string]types.AttributeValue{
+ LeaseKeyKey: &types.AttributeValueMemberS{
+ Value: shardID,
+ },
+ },
+ UpdateExpression: aws.String("remove " + LeaseOwnerKey),
+ ExpressionAttributeValues: map[string]types.AttributeValue{
+ ":assigned_to": &types.AttributeValueMemberS{
+ Value: checkpointer.kclConfig.WorkerID,
+ },
+ },
+ ConditionExpression: aws.String("AssignedTo = :assigned_to"),
+ }
+
+ _, err := checkpointer.svc.UpdateItem(context.TODO(), input)
+
+ return err
+}
+
+// ListActiveWorkers returns a map of workers and their shards
+func (checkpointer *DynamoCheckpoint) ListActiveWorkers(shardStatus map[string]*par.ShardStatus) (map[string][]*par.ShardStatus, error) {
+ err := checkpointer.syncLeases(shardStatus)
+ if err != nil {
+ return nil, err
+ }
+
+ workers := map[string][]*par.ShardStatus{}
+ for _, shard := range shardStatus {
+ if shard.GetCheckpoint() == ShardEnd {
+ continue
+ }
+
+ leaseOwner := shard.GetLeaseOwner()
+ if leaseOwner == "" {
+ checkpointer.log.Debugf("Shard Not Assigned Error. ShardID: %s, WorkerID: %s", shard.ID, checkpointer.kclConfig.WorkerID)
+ return nil, ErrShardNotAssigned
+ }
+
+ if w, ok := workers[leaseOwner]; ok {
+ workers[leaseOwner] = append(w, shard)
+ } else {
+ workers[leaseOwner] = []*par.ShardStatus{shard}
+ }
+ }
+ return workers, nil
+}
+
+// ClaimShard places a claim request on a shard to signal a steal attempt
+func (checkpointer *DynamoCheckpoint) ClaimShard(shard *par.ShardStatus, claimID string) error {
+ err := checkpointer.FetchCheckpoint(shard)
+ if err != nil && err != ErrSequenceIDNotFound {
+ return err
+ }
+ leaseTimeoutString := shard.GetLeaseTimeout().Format(time.RFC3339)
+
+ conditionalExpression := `ShardID = :id AND LeaseTimeout = :lease_timeout AND attribute_not_exists(ClaimRequest)`
+ expressionAttributeValues := map[string]types.AttributeValue{
+ ":id": &types.AttributeValueMemberS{
+ Value: shard.ID,
+ },
+ ":lease_timeout": &types.AttributeValueMemberS{
+ Value: leaseTimeoutString,
+ },
+ }
+
+ marshalledCheckpoint := map[string]types.AttributeValue{
+ LeaseKeyKey: &types.AttributeValueMemberS{
+ Value: shard.ID,
+ },
+ LeaseTimeoutKey: &types.AttributeValueMemberS{
+ Value: leaseTimeoutString,
+ },
+ SequenceNumberKey: &types.AttributeValueMemberS{
+ Value: shard.Checkpoint,
+ },
+ ClaimRequestKey: &types.AttributeValueMemberS{
+ Value: claimID,
+ },
+ }
+
+ if leaseOwner := shard.GetLeaseOwner(); leaseOwner == "" {
+ conditionalExpression += " AND attribute_not_exists(AssignedTo)"
+ } else {
+ marshalledCheckpoint[LeaseOwnerKey] = &types.AttributeValueMemberS{Value: leaseOwner}
+ conditionalExpression += "AND AssignedTo = :assigned_to"
+ expressionAttributeValues[":assigned_to"] = &types.AttributeValueMemberS{Value: leaseOwner}
+ }
+
+ if checkpoint := shard.GetCheckpoint(); checkpoint == "" {
+ conditionalExpression += " AND attribute_not_exists(Checkpoint)"
+ } else if checkpoint == ShardEnd {
+ conditionalExpression += " AND Checkpoint <> :checkpoint"
+ expressionAttributeValues[":checkpoint"] = &types.AttributeValueMemberS{Value: ShardEnd}
+ } else {
+ conditionalExpression += " AND Checkpoint = :checkpoint"
+ expressionAttributeValues[":checkpoint"] = &types.AttributeValueMemberS{Value: checkpoint}
+ }
+
+ if shard.ParentShardId == "" {
+ conditionalExpression += " AND attribute_not_exists(ParentShardId)"
+ } else {
+ marshalledCheckpoint[ParentShardIdKey] = &types.AttributeValueMemberS{Value: shard.ParentShardId}
+ conditionalExpression += " AND ParentShardId = :parent_shard"
+ expressionAttributeValues[":parent_shard"] = &types.AttributeValueMemberS{Value: shard.ParentShardId}
+ }
+
+ return checkpointer.conditionalUpdate(conditionalExpression, expressionAttributeValues, marshalledCheckpoint)
+}
+
+func (checkpointer *DynamoCheckpoint) syncLeases(shardStatus map[string]*par.ShardStatus) error {
+ log := checkpointer.kclConfig.Logger
+
+ if (checkpointer.lastLeaseSync.Add(time.Duration(checkpointer.kclConfig.LeaseSyncingTimeIntervalMillis) * time.Millisecond)).After(time.Now()) {
+ return nil
+ }
+
+ checkpointer.lastLeaseSync = time.Now()
+ input := &dynamodb.ScanInput{
+ ProjectionExpression: aws.String(fmt.Sprintf("%s,%s,%s", LeaseKeyKey, LeaseOwnerKey, SequenceNumberKey)),
+ Select: "SPECIFIC_ATTRIBUTES",
+ TableName: aws.String(checkpointer.kclConfig.TableName),
+ }
+
+ scanOutput, err := checkpointer.svc.Scan(context.TODO(), input)
+ results := scanOutput.Items
+ for _, result := range results {
+ shardId, foundShardId := result[LeaseKeyKey]
+ assignedTo, foundAssignedTo := result[LeaseOwnerKey]
+ checkpoint, foundCheckpoint := result[SequenceNumberKey]
+ if !foundShardId || !foundAssignedTo || !foundCheckpoint {
+ continue
+ }
+
+ if shard, ok := shardStatus[shardId.(*types.AttributeValueMemberS).Value]; ok {
+ shard.SetLeaseOwner(assignedTo.(*types.AttributeValueMemberS).Value)
+ shard.SetCheckpoint(checkpoint.(*types.AttributeValueMemberS).Value)
+ }
+ }
+
+ if err != nil {
+ log.Debugf("Error performing SyncLeases. Error: %+v ", err)
+ return err
+ }
+ log.Debugf("Lease sync completed. Next lease sync will occur in %s", time.Duration(checkpointer.kclConfig.LeaseSyncingTimeIntervalMillis)*time.Millisecond)
+ return nil
+}
+
+func (checkpointer *DynamoCheckpoint) createTable() error {
+ input := &dynamodb.CreateTableInput{
+ AttributeDefinitions: []types.AttributeDefinition{
+ {
+ AttributeName: aws.String(LeaseKeyKey),
+ AttributeType: types.ScalarAttributeTypeS,
+ },
+ },
+ KeySchema: []types.KeySchemaElement{
+ {
+ AttributeName: aws.String(LeaseKeyKey),
+ KeyType: types.KeyTypeHash,
+ },
+ },
+ ProvisionedThroughput: &types.ProvisionedThroughput{
+ ReadCapacityUnits: aws.Int64(checkpointer.leaseTableReadCapacity),
+ WriteCapacityUnits: aws.Int64(checkpointer.leaseTableWriteCapacity),
+ },
+ TableName: aws.String(checkpointer.TableName),
+ }
+ _, err := checkpointer.svc.CreateTable(context.Background(), input)
+
+ return err
+}
+
+func (checkpointer *DynamoCheckpoint) doesTableExist() bool {
+ input := &dynamodb.DescribeTableInput{
+ TableName: aws.String(checkpointer.TableName),
+ }
+ _, err := checkpointer.svc.DescribeTable(context.Background(), input)
+
+ return err == nil
+}
+
+func (checkpointer *DynamoCheckpoint) saveItem(item map[string]types.AttributeValue) error {
+ return checkpointer.putItem(&dynamodb.PutItemInput{
+ TableName: aws.String(checkpointer.TableName),
+ Item: item,
+ })
+}
+
+func (checkpointer *DynamoCheckpoint) conditionalUpdate(conditionExpression string, expressionAttributeValues map[string]types.AttributeValue, item map[string]types.AttributeValue) error {
+ return checkpointer.putItem(&dynamodb.PutItemInput{
+ ConditionExpression: aws.String(conditionExpression),
+ TableName: aws.String(checkpointer.TableName),
+ Item: item,
+ ExpressionAttributeValues: expressionAttributeValues,
+ })
+}
+
+func (checkpointer *DynamoCheckpoint) putItem(input *dynamodb.PutItemInput) error {
+ _, err := checkpointer.svc.PutItem(context.Background(), input)
+ return err
+}
+
+func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]types.AttributeValue, error) {
+ item, err := checkpointer.svc.GetItem(context.Background(), &dynamodb.GetItemInput{
+ TableName: aws.String(checkpointer.TableName),
+ ConsistentRead: aws.Bool(true),
+ Key: map[string]types.AttributeValue{
+ LeaseKeyKey: &types.AttributeValueMemberS{
+ Value: shardID,
+ },
+ },
+ })
+
+ // fix problem when starts the environment from scratch (dynamo table is empty)
+ if item == nil {
+ return nil, err
+ }
+
+ return item.Item, err
+}
+
+func (checkpointer *DynamoCheckpoint) removeItem(shardID string) error {
+ _, err := checkpointer.svc.DeleteItem(context.Background(), &dynamodb.DeleteItemInput{
+ TableName: aws.String(checkpointer.TableName),
+ Key: map[string]types.AttributeValue{
+ LeaseKeyKey: &types.AttributeValueMemberS{
+ Value: shardID,
+ },
+ },
+ })
+
+ return err
+}
diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go
new file mode 100644
index 0000000..1f0a8f1
--- /dev/null
+++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go
@@ -0,0 +1,563 @@
+/*
+ * Copyright (c) 2019 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+// The implementation is derived from https://github.com/patrobinson/gokini
+//
+// Copyright 2018 Patrick robinson
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+package checkpoint
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/stretchr/testify/assert"
+
+ cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config"
+ par "github.com/vmware/vmware-go-kcl/clientlibrary/partition"
+)
+
+func TestDoesTableExist(t *testing.T) {
+ svc := &mockDynamoDB{client: nil, tableExist: true, item: map[string]types.AttributeValue{}}
+ checkpoint := &DynamoCheckpoint{
+ TableName: "TableName",
+ svc: svc.client,
+ }
+ if !checkpoint.doesTableExist() {
+ t.Error("Table exists but returned false")
+ }
+
+ svc = &mockDynamoDB{tableExist: false}
+ checkpoint.svc = svc.client
+ if checkpoint.doesTableExist() {
+ t.Error("Table does not exist but returned true")
+ }
+}
+
+func TestGetLeaseNotAcquired(t *testing.T) {
+ svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}}
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000)
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ _ = checkpoint.Init()
+ err := checkpoint.GetLease(&par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "",
+ Mux: &sync.RWMutex{},
+ }, "abcd-efgh")
+ if err != nil {
+ t.Errorf("Error getting lease %s", err)
+ }
+
+ err = checkpoint.GetLease(&par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "",
+ Mux: &sync.RWMutex{},
+ }, "ijkl-mnop")
+
+ if err == nil || !errors.As(err, &ErrLeaseNotAcquired{}) {
+ t.Errorf("Got a lease when it was already held by abcd-efgh: %s", err)
+ }
+}
+
+func TestGetLeaseAquired(t *testing.T) {
+ svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}}
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000)
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ _ = checkpoint.Init()
+ marshalledCheckpoint := map[string]types.AttributeValue{
+ LeaseKeyKey: &types.AttributeValueMemberS{
+ Value: "0001",
+ },
+ LeaseOwnerKey: &types.AttributeValueMemberS{
+ Value: "abcd-efgh",
+ },
+ LeaseTimeoutKey: &types.AttributeValueMemberS{
+ Value: time.Now().AddDate(0, -1, 0).UTC().Format(time.RFC3339),
+ },
+ SequenceNumberKey: &types.AttributeValueMemberS{
+ Value: "deadbeef",
+ },
+ }
+ input := &dynamodb.PutItemInput{
+ TableName: aws.String("TableName"),
+ Item: marshalledCheckpoint,
+ }
+ _, _ = checkpoint.svc.PutItem(context.TODO(), input)
+
+ shard := &par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "deadbeef",
+ Mux: &sync.RWMutex{},
+ }
+ err := checkpoint.GetLease(shard, "ijkl-mnop")
+
+ if err != nil {
+ t.Errorf("Lease not aquired after timeout %s", err)
+ }
+
+ id, ok := svc.item[SequenceNumberKey]
+ if !ok {
+ t.Error("Expected checkpoint to be set by GetLease")
+ } else if id.(*types.AttributeValueMemberS).Value != "deadbeef" {
+ t.Errorf("Expected checkpoint to be deadbeef. Got '%s'", id.(*types.AttributeValueMemberS).Value)
+ }
+
+ // release owner info
+ err = checkpoint.RemoveLeaseOwner(shard.ID)
+ assert.Nil(t, err)
+
+ status := &par.ShardStatus{
+ ID: shard.ID,
+ Mux: &sync.RWMutex{},
+ }
+ _ = checkpoint.FetchCheckpoint(status)
+
+ // checkpointer and parent shard id should be the same
+ assert.Equal(t, shard.Checkpoint, status.Checkpoint)
+ assert.Equal(t, shard.ParentShardId, status.ParentShardId)
+
+ // Only the lease owner has been wiped out
+ assert.Equal(t, "", status.GetLeaseOwner())
+}
+
+func TestGetLeaseShardClaimed(t *testing.T) {
+ leaseTimeout := time.Now().Add(-100 * time.Second).UTC()
+ svc := &mockDynamoDB{
+ tableExist: true,
+ item: map[string]types.AttributeValue{
+ ClaimRequestKey: &types.AttributeValueMemberS{Value: "ijkl-mnop"},
+ LeaseTimeoutKey: &types.AttributeValueMemberS{Value: leaseTimeout.Format(time.RFC3339)},
+ },
+ }
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLeaseStealing(true)
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ _ = checkpoint.Init()
+ err := checkpoint.GetLease(&par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "",
+ LeaseTimeout: leaseTimeout,
+ Mux: &sync.RWMutex{},
+ }, "abcd-efgh")
+ if err == nil || err.Error() != ErrShardClaimed {
+ t.Errorf("Got a lease when it was already claimed by by ijkl-mnop: %s", err)
+ }
+
+ err = checkpoint.GetLease(&par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "",
+ LeaseTimeout: leaseTimeout,
+ Mux: &sync.RWMutex{},
+ }, "ijkl-mnop")
+ if err != nil {
+ t.Errorf("Error getting lease %s", err)
+ }
+}
+
+func TestGetLeaseClaimRequestExpiredOwner(t *testing.T) {
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLeaseStealing(true)
+
+ // Not expired
+ leaseTimeout := time.Now().
+ Add(-time.Duration(kclConfig.LeaseStealingClaimTimeoutMillis) * time.Millisecond).
+ Add(1 * time.Second).
+ UTC()
+
+ svc := &mockDynamoDB{
+ tableExist: true,
+ item: map[string]types.AttributeValue{
+ LeaseOwnerKey: &types.AttributeValueMemberS{Value: "abcd-efgh"},
+ ClaimRequestKey: &types.AttributeValueMemberS{Value: "ijkl-mnop"},
+ LeaseTimeoutKey: &types.AttributeValueMemberS{Value: leaseTimeout.Format(time.RFC3339)},
+ },
+ }
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ _ = checkpoint.Init()
+ err := checkpoint.GetLease(&par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "",
+ LeaseTimeout: leaseTimeout,
+ Mux: &sync.RWMutex{},
+ }, "abcd-efgh")
+ if err == nil || err.Error() != ErrShardClaimed {
+ t.Errorf("Got a lease when it was already claimed by ijkl-mnop: %s", err)
+ }
+}
+
+func TestGetLeaseClaimRequestExpiredClaimer(t *testing.T) {
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLeaseStealing(true)
+
+ // Not expired
+ leaseTimeout := time.Now().
+ Add(-time.Duration(kclConfig.LeaseStealingClaimTimeoutMillis) * time.Millisecond).
+ Add(121 * time.Second).
+ UTC()
+
+ svc := &mockDynamoDB{
+ tableExist: true,
+ item: map[string]types.AttributeValue{
+ LeaseOwnerKey: &types.AttributeValueMemberS{Value: "abcd-efgh"},
+ ClaimRequestKey: &types.AttributeValueMemberS{Value: "ijkl-mnop"},
+ LeaseTimeoutKey: &types.AttributeValueMemberS{Value: leaseTimeout.Format(time.RFC3339)},
+ },
+ }
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ _ = checkpoint.Init()
+ err := checkpoint.GetLease(&par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "",
+ LeaseTimeout: leaseTimeout,
+ Mux: &sync.RWMutex{},
+ }, "ijkl-mnop")
+ if err == nil || !errors.As(err, &ErrLeaseNotAcquired{}) {
+ t.Errorf("Got a lease when it was already claimed by ijkl-mnop: %s", err)
+ }
+}
+
+func TestFetchCheckpointWithStealing(t *testing.T) {
+ future := time.Now().AddDate(0, 1, 0)
+
+ svc := &mockDynamoDB{
+ tableExist: true,
+ item: map[string]types.AttributeValue{
+ SequenceNumberKey: &types.AttributeValueMemberS{Value: "deadbeef"},
+ LeaseOwnerKey: &types.AttributeValueMemberS{Value: "abcd-efgh"},
+ LeaseTimeoutKey: &types.AttributeValueMemberS{
+ Value: future.Format(time.RFC3339),
+ },
+ },
+ }
+
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLeaseStealing(true)
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ _ = checkpoint.Init()
+
+ status := &par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "",
+ LeaseTimeout: time.Now(),
+ Mux: &sync.RWMutex{},
+ }
+
+ _ = checkpoint.FetchCheckpoint(status)
+
+ leaseTimeout, _ := time.Parse(time.RFC3339, svc.item[LeaseTimeoutKey].(*types.AttributeValueMemberS).Value)
+ assert.Equal(t, leaseTimeout, status.LeaseTimeout)
+}
+
+func TestGetLeaseConditional(t *testing.T) {
+ svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}}
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLeaseStealing(true)
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ _ = checkpoint.Init()
+ marshalledCheckpoint := map[string]types.AttributeValue{
+ LeaseKeyKey: &types.AttributeValueMemberS{
+ Value: "0001",
+ },
+ LeaseOwnerKey: &types.AttributeValueMemberS{
+ Value: "abcd-efgh",
+ },
+ LeaseTimeoutKey: &types.AttributeValueMemberS{
+ Value: time.Now().Add(-1 * time.Second).UTC().Format(time.RFC3339),
+ },
+ SequenceNumberKey: &types.AttributeValueMemberS{
+ Value: "deadbeef",
+ },
+ ClaimRequestKey: &types.AttributeValueMemberS{
+ Value: "ijkl-mnop",
+ },
+ }
+ input := &dynamodb.PutItemInput{
+ TableName: aws.String("TableName"),
+ Item: marshalledCheckpoint,
+ }
+ _, _ = checkpoint.svc.PutItem(context.TODO(), input)
+
+ shard := &par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "deadbeef",
+ ClaimRequest: "ijkl-mnop",
+ Mux: &sync.RWMutex{},
+ }
+ err := checkpoint.FetchCheckpoint(shard)
+ if err != nil {
+ t.Errorf("Could not fetch checkpoint %s", err)
+ }
+
+ err = checkpoint.GetLease(shard, "ijkl-mnop")
+ if err != nil {
+ t.Errorf("Lease not aquired after timeout %s", err)
+ }
+ assert.Equal(t, svc.expressionAttributeValues[":claim_request"].(*types.AttributeValueMemberS).Value, "ijkl-mnop")
+ assert.Contains(t, svc.conditionalExpression, " AND ClaimRequest = :claim_request")
+}
+
+func TestListActiveWorkers(t *testing.T) {
+ svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}}
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithLeaseStealing(true)
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ err := checkpoint.Init()
+ if err != nil {
+ t.Errorf("Checkpoint initialization failed: %+v", err)
+ }
+
+ shardStatus := map[string]*par.ShardStatus{
+ "0000": {ID: "0000", AssignedTo: "worker_1", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0001": {ID: "0001", AssignedTo: "worker_2", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0002": {ID: "0002", AssignedTo: "worker_4", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0003": {ID: "0003", AssignedTo: "worker_0", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0004": {ID: "0004", AssignedTo: "worker_1", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0005": {ID: "0005", AssignedTo: "worker_3", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0006": {ID: "0006", AssignedTo: "worker_3", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0007": {ID: "0007", AssignedTo: "worker_0", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0008": {ID: "0008", AssignedTo: "worker_4", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0009": {ID: "0009", AssignedTo: "worker_2", Checkpoint: "", Mux: &sync.RWMutex{}},
+ "0010": {ID: "0010", AssignedTo: "worker_0", Checkpoint: ShardEnd, Mux: &sync.RWMutex{}},
+ }
+
+ workers, err := checkpoint.ListActiveWorkers(shardStatus)
+ if err != nil {
+ t.Error(err)
+ }
+
+ for workerID, shards := range workers {
+ assert.Equal(t, 2, len(shards))
+ for _, shard := range shards {
+ assert.Equal(t, workerID, shard.AssignedTo)
+ }
+ }
+}
+
+func TestListActiveWorkersErrShardNotAssigned(t *testing.T) {
+ svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}}
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithLeaseStealing(true)
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ err := checkpoint.Init()
+ if err != nil {
+ t.Errorf("Checkpoint initialization failed: %+v", err)
+ }
+
+ shardStatus := map[string]*par.ShardStatus{
+ "0000": {ID: "0000", Mux: &sync.RWMutex{}},
+ }
+
+ _, err = checkpoint.ListActiveWorkers(shardStatus)
+ if err != ErrShardNotAssigned {
+ t.Error("Expected ErrShardNotAssigned when shard is missing AssignedTo value")
+ }
+}
+
+func TestClaimShard(t *testing.T) {
+ svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}}
+ kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc").
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLeaseStealing(true)
+
+ checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client)
+ _ = checkpoint.Init()
+
+ marshalledCheckpoint := map[string]types.AttributeValue{
+ "ShardID": &types.AttributeValueMemberS{
+ Value: "0001",
+ },
+ "AssignedTo": &types.AttributeValueMemberS{
+ Value: "abcd-efgh",
+ },
+ "LeaseTimeout": &types.AttributeValueMemberS{
+ Value: time.Now().AddDate(0, -1, 0).UTC().Format(time.RFC3339),
+ },
+ "Checkpoint": &types.AttributeValueMemberS{
+ Value: "deadbeef",
+ },
+ }
+ input := &dynamodb.PutItemInput{
+ TableName: aws.String("TableName"),
+ Item: marshalledCheckpoint,
+ }
+ _, _ = checkpoint.svc.PutItem(context.TODO(), input)
+
+ shard := &par.ShardStatus{
+ ID: "0001",
+ Checkpoint: "deadbeef",
+ Mux: &sync.RWMutex{},
+ }
+
+ err := checkpoint.ClaimShard(shard, "ijkl-mnop")
+ if err != nil {
+ t.Errorf("Shard not claimed %s", err)
+ }
+
+ claimRequest, ok := svc.item[ClaimRequestKey]
+ if !ok {
+ t.Error("Expected claimRequest to be set by ClaimShard")
+ } else if claimRequest.(*types.AttributeValueMemberS).Value != "ijkl-mnop" {
+ t.Errorf("Expected checkpoint to be ijkl-mnop. Got '%s'", claimRequest.(*types.AttributeValueMemberS).Value)
+ }
+
+ status := &par.ShardStatus{
+ ID: shard.ID,
+ Mux: &sync.RWMutex{},
+ }
+ _ = checkpoint.FetchCheckpoint(status)
+
+ // asiggnedTo, checkpointer, and parent shard id should be the same
+ assert.Equal(t, shard.AssignedTo, status.AssignedTo)
+ assert.Equal(t, shard.Checkpoint, status.Checkpoint)
+ assert.Equal(t, shard.ParentShardId, status.ParentShardId)
+}
+
+type mockDynamoDB struct {
+ client *dynamodb.Client
+ tableExist bool
+ item map[string]types.AttributeValue
+ conditionalExpression string
+ expressionAttributeValues map[string]types.AttributeValue
+}
+
+func (m *mockDynamoDB) ScanPages(_ *dynamodb.ScanInput, _ func(*dynamodb.ScanOutput, bool) bool) error {
+ return nil
+}
+
+func (m *mockDynamoDB) DescribeTable(_ *dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) {
+ if !m.tableExist {
+ return &dynamodb.DescribeTableOutput{}, &types.ResourceNotFoundException{Message: aws.String("doesNotExist")}
+ }
+
+ return &dynamodb.DescribeTableOutput{}, nil
+}
+
+func (m *mockDynamoDB) PutItem(input *dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) {
+ item := input.Item
+
+ if shardID, ok := item[LeaseKeyKey]; ok {
+ m.item[LeaseKeyKey] = shardID
+ }
+
+ if owner, ok := item[LeaseOwnerKey]; ok {
+ m.item[LeaseOwnerKey] = owner
+ }
+
+ if timeout, ok := item[LeaseTimeoutKey]; ok {
+ m.item[LeaseTimeoutKey] = timeout
+ }
+
+ if checkpoint, ok := item[SequenceNumberKey]; ok {
+ m.item[SequenceNumberKey] = checkpoint
+ }
+
+ if parent, ok := item[ParentShardIdKey]; ok {
+ m.item[ParentShardIdKey] = parent
+ }
+
+ if claimRequest, ok := item[ClaimRequestKey]; ok {
+ m.item[ClaimRequestKey] = claimRequest
+ }
+
+ if input.ConditionExpression != nil {
+ m.conditionalExpression = *input.ConditionExpression
+ }
+
+ m.expressionAttributeValues = input.ExpressionAttributeValues
+
+ return nil, nil
+}
+
+func (m *mockDynamoDB) GetItem(_ *dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) {
+ return &dynamodb.GetItemOutput{
+ Item: m.item,
+ }, nil
+}
+
+func (m *mockDynamoDB) UpdateItem(input *dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) {
+ exp := input.UpdateExpression
+
+ if aws.ToString(exp) == "remove "+LeaseOwnerKey {
+ delete(m.item, LeaseOwnerKey)
+ }
+
+ return nil, nil
+}
+
+func (m *mockDynamoDB) CreateTable(_ *dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) {
+ return &dynamodb.CreateTableOutput{}, nil
+}
diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go
new file mode 100644
index 0000000..35cb6ad
--- /dev/null
+++ b/clientlibrary/config/config.go
@@ -0,0 +1,317 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package config
+// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client
+/*
+ * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package config
+
+import (
+ "log"
+ "math"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+
+ "github.com/vmware/vmware-go-kcl/clientlibrary/metrics"
+ "github.com/vmware/vmware-go-kcl/logger"
+)
+
+const (
+ // LATEST start after the most recent data record (fetch new data).
+ LATEST InitialPositionInStream = iota + 1
+ // TRIM_HORIZON start from the oldest available data record
+ TRIM_HORIZON
+ // AT_TIMESTAMP start from the record at or after the specified server-side Timestamp.
+ AT_TIMESTAMP
+
+ // DefaultInitialPositionInStream The location in the shard from which the KinesisClientLibrary will start fetching records from
+ // when the application starts for the first time and there is no checkpoint for the shard.
+ DefaultInitialPositionInStream = LATEST
+
+ // DefaultFailoverTimeMillis Fail over time in milliseconds. A worker which does not renew it's lease within this time interval
+ // will be regarded as having problems and it's shards will be assigned to other workers.
+ // For applications that have a large number of shards, this may be set to a higher number to reduce
+ // the number of DynamoDB IOPS required for tracking leases.
+ DefaultFailoverTimeMillis = 10000
+
+ // DefaultLeaseRefreshPeriodMillis Period before the end of lease during which a lease is refreshed by the owner.
+ DefaultLeaseRefreshPeriodMillis = 5000
+
+ // DefaultMaxRecords Max records to fetch from Kinesis in a single GetRecords call.
+ DefaultMaxRecords = 10000
+
+ // DefaultIdleTimeBetweenReadsMillis The default value for how long the {@link ShardConsumer}
+ // should sleep if no records are returned from the call to
+ DefaultIdleTimeBetweenReadsMillis = 1000
+
+ // DefaultDontCallProcessRecordsForEmptyRecordList Don't call processRecords() on the record processor for empty record lists.
+ DefaultDontCallProcessRecordsForEmptyRecordList = false
+
+ // DefaultParentShardPollIntervalMillis Interval in milliseconds between polling to check for parent shard completion.
+ // Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on
+ // completion of parent shards).
+ DefaultParentShardPollIntervalMillis = 10000
+
+ // DefaultShardSyncIntervalMillis Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks.
+ DefaultShardSyncIntervalMillis = 60000
+
+ // DefaultCleanupLeasesUponShardsCompletion Cleanup leases upon shards completion (don't wait until they expire in Kinesis).
+ // Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by
+ // default we try to delete the ones we don't need any longer.
+ DefaultCleanupLeasesUponShardsCompletion = true
+
+ // DefaultTaskBackoffTimeMillis Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures).
+ DefaultTaskBackoffTimeMillis = 500
+
+ // DefaultValidateSequenceNumberBeforeCheckpointing KCL will validate client provided sequence numbers with a call to Amazon Kinesis before
+ // checkpointing for calls to {@link RecordProcessorCheckpointer#checkpoint(String)} by default.
+ DefaultValidateSequenceNumberBeforeCheckpointing = true
+
+ // DefaultMaxLeasesForWorker The max number of leases (shards) this worker should process.
+ // This can be useful to avoid overloading (and thrashing) a worker when a host has resource constraints
+ // or during deployment.
+ // NOTE: Setting this to a low value can cause data loss if workers are not able to pick up all shards in the
+ // stream due to the max limit.
+ DefaultMaxLeasesForWorker = math.MaxInt16
+
+ // DefaultMaxLeasesToStealAtOneTime Max leases to steal from another worker at one time (for load balancing).
+ // Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts),
+ // but can cause higher churn in the system.
+ DefaultMaxLeasesToStealAtOneTime = 1
+
+ // DefaultInitialLeaseTableReadCapacity The Amazon DynamoDB table used for tracking leases will be provisioned with this read capacity.
+ DefaultInitialLeaseTableReadCapacity = 10
+
+ // DefaultInitialLeaseTableWriteCapacity The Amazon DynamoDB table used for tracking leases will be provisioned with this write capacity.
+ DefaultInitialLeaseTableWriteCapacity = 10
+
+ // DefaultSkipShardSyncAtStartupIfLeasesExist The Worker will skip shard sync during initialization if there are one or more leases in the lease table. This
+ // assumes that the shards and leases are in-sync. This enables customers to choose faster startup times (e.g.
+ // during incremental deployments of an application).
+ DefaultSkipShardSyncAtStartupIfLeasesExist = false
+
+ // DefaultShutdownGraceMillis The amount of milliseconds to wait before graceful shutdown forcefully terminates.
+ DefaultShutdownGraceMillis = 5000
+
+ // DefaultEnableLeaseStealing Lease stealing defaults to false for backwards compatibility.
+ DefaultEnableLeaseStealing = false
+
+ // DefaultLeaseStealingIntervalMillis Interval between rebalance tasks defaults to 5 seconds.
+ DefaultLeaseStealingIntervalMillis = 5000
+
+ // DefaultLeaseStealingClaimTimeoutMillis Number of milliseconds to wait before another worker can aquire a claimed shard
+ DefaultLeaseStealingClaimTimeoutMillis = 120000
+
+ // DefaultLeaseSyncingIntervalMillis Number of milliseconds to wait before syncing with lease table (dynamodDB)
+ DefaultLeaseSyncingIntervalMillis = 60000
+)
+
+type (
+ // InitialPositionInStream Used to specify the Position in the stream where a new application should start from
+ // This is used during initial application bootstrap (when a checkpoint doesn't exist for a shard or its parents)
+ InitialPositionInStream int
+
+ // InitialPositionInStreamExtended Class that houses the entities needed to specify the Position in the stream from where a new application should
+ // start.
+ InitialPositionInStreamExtended struct {
+ Position InitialPositionInStream
+
+ // The time stamp of the data record from which to start reading. Used with
+ // shard iterator type AT_TIMESTAMP. A time stamp is the Unix epoch date with
+ // precision in milliseconds. For example, 2016-04-04T19:58:46.480-00:00 or
+ // 1459799926.480. If a record with this exact time stamp does not exist, the
+ // iterator returned is for the next (later) record. If the time stamp is older
+ // than the current trim horizon, the iterator returned is for the oldest untrimmed
+ // data record (TRIM_HORIZON).
+ Timestamp *time.Time `type:"Timestamp" timestampFormat:"unix"`
+ }
+
+ // KinesisClientLibConfiguration Configuration for the Kinesis Client Library.
+ // Note: There is no need to configure credential provider. Credential can be get from InstanceProfile.
+ KinesisClientLibConfiguration struct {
+ // ApplicationName is name of application. Kinesis allows multiple applications to consume the same stream.
+ ApplicationName string
+
+ // DynamoDBEndpoint is an optional endpoint URL that overrides the default generated endpoint for a DynamoDB client.
+ // If this is empty, the default generated endpoint will be used.
+ DynamoDBEndpoint string
+
+ // KinesisEndpoint is an optional endpoint URL that overrides the default generated endpoint for a Kinesis client.
+ // If this is empty, the default generated endpoint will be used.
+ KinesisEndpoint string
+
+ // KinesisCredentials is used to access Kinesis
+ KinesisCredentials *credentials.StaticCredentialsProvider
+
+ // DynamoDBCredentials is used to access DynamoDB
+ DynamoDBCredentials *credentials.StaticCredentialsProvider
+
+ // TableName is name of the dynamo db table for managing kinesis stream default to ApplicationName
+ TableName string
+
+ // StreamName is the name of Kinesis stream
+ StreamName string
+
+ // EnableEnhancedFanOutConsumer enables enhanced fan-out consumer
+ // See: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html
+ // Either consumer name or consumer ARN must be specified when Enhanced Fan-Out is enabled.
+ EnableEnhancedFanOutConsumer bool
+
+ // EnhancedFanOutConsumerName is the name of the enhanced fan-out consumer to create. If this isn't set the ApplicationName will be used.
+ EnhancedFanOutConsumerName string
+
+ // EnhancedFanOutConsumerARN is the ARN of an already created enhanced fan-out consumer, if this is set no automatic consumer creation will be attempted
+ EnhancedFanOutConsumerARN string
+
+ // WorkerID used to distinguish different workers/processes of a Kinesis application
+ WorkerID string
+
+ // InitialPositionInStream specifies the Position in the stream where a new application should start from
+ InitialPositionInStream InitialPositionInStream
+
+ // InitialPositionInStreamExtended provides actual AT_TIMESTAMP value
+ InitialPositionInStreamExtended InitialPositionInStreamExtended
+
+ // credentials to access Kinesis/Dynamo: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/
+ // Note: No need to configure here. Use NewEnvCredentials for testing and EC2RoleProvider for production
+
+ // FailoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others)
+ FailoverTimeMillis int
+
+ // LeaseRefreshPeriodMillis is the period before the end of lease during which a lease is refreshed by the owner.
+ LeaseRefreshPeriodMillis int
+
+ // MaxRecords Max records to read per Kinesis getRecords() call
+ MaxRecords int
+
+ // IdleTimeBetweenReadsInMillis Idle time between calls to fetch data from Kinesis
+ IdleTimeBetweenReadsInMillis int
+
+ // CallProcessRecordsEvenForEmptyRecordList Call the IRecordProcessor::processRecords() API even if
+ // GetRecords returned an empty record list.
+ CallProcessRecordsEvenForEmptyRecordList bool
+
+ // ParentShardPollIntervalMillis Wait for this long between polls to check if parent shards are done
+ ParentShardPollIntervalMillis int
+
+ // ShardSyncIntervalMillis Time between tasks to sync leases and Kinesis shards
+ ShardSyncIntervalMillis int
+
+ // CleanupTerminatedShardsBeforeExpiry Clean up shards we've finished processing (don't wait for expiration)
+ CleanupTerminatedShardsBeforeExpiry bool
+
+ // kinesisClientConfig Client Configuration used by Kinesis client
+ // dynamoDBClientConfig Client Configuration used by DynamoDB client
+ // Note: we will use default client provided by AWS SDK
+
+ // TaskBackoffTimeMillis Backoff period when tasks encounter an exception
+ TaskBackoffTimeMillis int
+
+ // ValidateSequenceNumberBeforeCheckpointing whether KCL should validate client provided sequence numbers
+ ValidateSequenceNumberBeforeCheckpointing bool
+
+ // RegionName The region name for the service
+ RegionName string
+
+ // ShutdownGraceMillis The number of milliseconds before graceful shutdown terminates forcefully
+ ShutdownGraceMillis int
+
+ // Operation parameters
+
+ // Max leases this Worker can handle at a time
+ MaxLeasesForWorker int
+
+ // Max leases to steal at one time (for load balancing)
+ MaxLeasesToStealAtOneTime int
+
+ // Read capacity to provision when creating the lease table (dynamoDB).
+ InitialLeaseTableReadCapacity int
+
+ // Write capacity to provision when creating the lease table.
+ InitialLeaseTableWriteCapacity int
+
+ // Worker should skip syncing shards and leases at startup if leases are present
+ // This is useful for optimizing deployments to large fleets working on a stable stream.
+ SkipShardSyncAtWorkerInitializationIfLeasesExist bool
+
+ // Logger used to log message.
+ Logger logger.Logger
+
+ // MonitoringService publishes per worker-scoped metrics.
+ MonitoringService metrics.MonitoringService
+
+ // EnableLeaseStealing turns on lease stealing
+ EnableLeaseStealing bool
+
+ // LeaseStealingIntervalMillis The number of milliseconds between rebalance tasks
+ LeaseStealingIntervalMillis int
+
+ // LeaseStealingClaimTimeoutMillis The number of milliseconds to wait before another worker can aquire a claimed shard
+ LeaseStealingClaimTimeoutMillis int
+
+ // LeaseSyncingTimeInterval The number of milliseconds to wait before syncing with lease table (dynamoDB)
+ LeaseSyncingTimeIntervalMillis int
+ }
+)
+
+var positionMap = map[InitialPositionInStream]*string{
+ LATEST: aws.String("LATEST"),
+ TRIM_HORIZON: aws.String("TRIM_HORIZON"),
+ AT_TIMESTAMP: aws.String("AT_TIMESTAMP"),
+}
+
+func InitalPositionInStreamToShardIteratorType(pos InitialPositionInStream) *string {
+ return positionMap[pos]
+}
+
+func empty(s string) bool {
+ return len(strings.TrimSpace(s)) == 0
+}
+
+// checkIsValueNotEmpty makes sure the value is not empty.
+func checkIsValueNotEmpty(key string, value string) {
+ if empty(value) {
+ // There is no point to continue for incorrect configuration. Fail fast!
+ log.Panicf("Non-empty value expected for %v, actual: %v", key, value)
+ }
+}
+
+// checkIsValuePositive makes sure the value is possitive.
+func checkIsValuePositive(key string, value int) {
+ if value <= 0 {
+ // There is no point to continue for incorrect configuration. Fail fast!
+ log.Panicf("Positive value expected for %v, actual: %v", key, value)
+ }
+}
diff --git a/clientlibrary/config/config_test.go b/clientlibrary/config/config_test.go
new file mode 100644
index 0000000..1785e91
--- /dev/null
+++ b/clientlibrary/config/config_test.go
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+package config
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/vmware/vmware-go-kcl/logger"
+)
+
+func TestConfig(t *testing.T) {
+ kclConfig := NewKinesisClientLibConfig("appName", "StreamName", "us-west-2", "workerId").
+ WithFailoverTimeMillis(500).
+ WithMaxRecords(100).
+ WithInitialPositionInStream(TRIM_HORIZON).
+ WithIdleTimeBetweenReadsInMillis(20).
+ WithCallProcessRecordsEvenForEmptyRecordList(true).
+ WithTaskBackoffTimeMillis(10).
+ WithEnhancedFanOutConsumerName("fan-out-consumer")
+
+ assert.Equal(t, "appName", kclConfig.ApplicationName)
+ assert.Equal(t, 500, kclConfig.FailoverTimeMillis)
+ assert.Equal(t, 10, kclConfig.TaskBackoffTimeMillis)
+
+ assert.True(t, kclConfig.EnableEnhancedFanOutConsumer)
+ assert.Equal(t, "fan-out-consumer", kclConfig.EnhancedFanOutConsumerName)
+
+ assert.Equal(t, false, kclConfig.EnableLeaseStealing)
+ assert.Equal(t, 5000, kclConfig.LeaseStealingIntervalMillis)
+
+ contextLogger := kclConfig.Logger.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with default logger")
+ contextLogger.Infof("Default logger is awesome")
+}
+
+func TestConfigLeaseStealing(t *testing.T) {
+ kclConfig := NewKinesisClientLibConfig("appName", "StreamName", "us-west-2", "workerId").
+ WithFailoverTimeMillis(500).
+ WithMaxRecords(100).
+ WithInitialPositionInStream(TRIM_HORIZON).
+ WithIdleTimeBetweenReadsInMillis(20).
+ WithCallProcessRecordsEvenForEmptyRecordList(true).
+ WithTaskBackoffTimeMillis(10).
+ WithLeaseStealing(true).
+ WithLeaseStealingIntervalMillis(10000)
+
+ assert.Equal(t, "appName", kclConfig.ApplicationName)
+ assert.Equal(t, 500, kclConfig.FailoverTimeMillis)
+ assert.Equal(t, 10, kclConfig.TaskBackoffTimeMillis)
+ assert.Equal(t, true, kclConfig.EnableLeaseStealing)
+ assert.Equal(t, 10000, kclConfig.LeaseStealingIntervalMillis)
+
+ contextLogger := kclConfig.Logger.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with default logger")
+ contextLogger.Infof("Default logger is awesome")
+}
+
+func TestConfigDefaultEnhancedFanOutConsumerName(t *testing.T) {
+ kclConfig := NewKinesisClientLibConfig("appName", "StreamName", "us-west-2", "workerId")
+
+ assert.Equal(t, "appName", kclConfig.ApplicationName)
+ assert.False(t, kclConfig.EnableEnhancedFanOutConsumer)
+ assert.Equal(t, "appName", kclConfig.EnhancedFanOutConsumerName)
+}
+
+func TestEmptyEnhancedFanOutConsumerName(t *testing.T) {
+ assert.PanicsWithValue(t, "Non-empty value expected for EnhancedFanOutConsumerName, actual: ", func() {
+ NewKinesisClientLibConfig("app", "stream", "us-west-2", "worker").WithEnhancedFanOutConsumerName("")
+ })
+}
+
+func TestConfigWithEnhancedFanOutConsumerARN(t *testing.T) {
+ kclConfig := NewKinesisClientLibConfig("app", "stream", "us-west-2", "worker").
+ WithEnhancedFanOutConsumerARN("consumer:arn")
+
+ assert.True(t, kclConfig.EnableEnhancedFanOutConsumer)
+ assert.Equal(t, "consumer:arn", kclConfig.EnhancedFanOutConsumerARN)
+}
+
+func TestEmptyEnhancedFanOutConsumerARN(t *testing.T) {
+ assert.PanicsWithValue(t, "Non-empty value expected for EnhancedFanOutConsumerARN, actual: ", func() {
+ NewKinesisClientLibConfig("app", "stream", "us-west-2", "worker").WithEnhancedFanOutConsumerARN("")
+ })
+}
diff --git a/clientlibrary/config/initial-stream-pos.go b/clientlibrary/config/initial-stream-pos.go
new file mode 100644
index 0000000..1cb0abd
--- /dev/null
+++ b/clientlibrary/config/initial-stream-pos.go
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package config
+// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client
+/*
+ * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package config
+
+import (
+ "time"
+)
+
+func newInitialPositionAtTimestamp(timestamp *time.Time) *InitialPositionInStreamExtended {
+ return &InitialPositionInStreamExtended{Position: AT_TIMESTAMP, Timestamp: timestamp}
+}
+
+func newInitialPosition(position InitialPositionInStream) *InitialPositionInStreamExtended {
+ return &InitialPositionInStreamExtended{Position: position, Timestamp: nil}
+}
diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go
new file mode 100644
index 0000000..ae4935c
--- /dev/null
+++ b/clientlibrary/config/kcl-config.go
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package config
+// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client
+/*
+ * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package config
+
+import (
+ "log"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/credentials"
+
+ "github.com/vmware/vmware-go-kcl/clientlibrary/metrics"
+ "github.com/vmware/vmware-go-kcl/clientlibrary/utils"
+ "github.com/vmware/vmware-go-kcl/logger"
+)
+
+// NewKinesisClientLibConfig creates a default KinesisClientLibConfiguration based on the required fields.
+func NewKinesisClientLibConfig(applicationName, streamName, regionName, workerID string) *KinesisClientLibConfiguration {
+ return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID,
+ nil, nil)
+}
+
+// NewKinesisClientLibConfigWithCredential creates a default KinesisClientLibConfiguration based on the required fields and unique credentials.
+func NewKinesisClientLibConfigWithCredential(applicationName, streamName, regionName, workerID string,
+ creds *credentials.StaticCredentialsProvider) *KinesisClientLibConfiguration {
+ return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID, creds, creds)
+}
+
+// NewKinesisClientLibConfigWithCredentials creates a default KinesisClientLibConfiguration based on the required fields and specific credentials for each service.
+func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID string,
+ kinesisCreds, dynamodbCreds *credentials.StaticCredentialsProvider) *KinesisClientLibConfiguration {
+ checkIsValueNotEmpty("ApplicationName", applicationName)
+ checkIsValueNotEmpty("StreamName", streamName)
+ checkIsValueNotEmpty("RegionName", regionName)
+
+ if empty(workerID) {
+ workerID = utils.MustNewUUID()
+ }
+
+ // populate the KCL configuration with default values
+ return &KinesisClientLibConfiguration{
+ ApplicationName: applicationName,
+ KinesisCredentials: kinesisCreds,
+ DynamoDBCredentials: dynamodbCreds,
+ TableName: applicationName,
+ EnhancedFanOutConsumerName: applicationName,
+ StreamName: streamName,
+ RegionName: regionName,
+ WorkerID: workerID,
+ InitialPositionInStream: DefaultInitialPositionInStream,
+ InitialPositionInStreamExtended: *newInitialPosition(DefaultInitialPositionInStream),
+ FailoverTimeMillis: DefaultFailoverTimeMillis,
+ LeaseRefreshPeriodMillis: DefaultLeaseRefreshPeriodMillis,
+ MaxRecords: DefaultMaxRecords,
+ IdleTimeBetweenReadsInMillis: DefaultIdleTimeBetweenReadsMillis,
+ CallProcessRecordsEvenForEmptyRecordList: DefaultDontCallProcessRecordsForEmptyRecordList,
+ ParentShardPollIntervalMillis: DefaultParentShardPollIntervalMillis,
+ ShardSyncIntervalMillis: DefaultShardSyncIntervalMillis,
+ CleanupTerminatedShardsBeforeExpiry: DefaultCleanupLeasesUponShardsCompletion,
+ TaskBackoffTimeMillis: DefaultTaskBackoffTimeMillis,
+ ValidateSequenceNumberBeforeCheckpointing: DefaultValidateSequenceNumberBeforeCheckpointing,
+ ShutdownGraceMillis: DefaultShutdownGraceMillis,
+ MaxLeasesForWorker: DefaultMaxLeasesForWorker,
+ MaxLeasesToStealAtOneTime: DefaultMaxLeasesToStealAtOneTime,
+ InitialLeaseTableReadCapacity: DefaultInitialLeaseTableReadCapacity,
+ InitialLeaseTableWriteCapacity: DefaultInitialLeaseTableWriteCapacity,
+ SkipShardSyncAtWorkerInitializationIfLeasesExist: DefaultSkipShardSyncAtStartupIfLeasesExist,
+ EnableLeaseStealing: DefaultEnableLeaseStealing,
+ LeaseStealingIntervalMillis: DefaultLeaseStealingIntervalMillis,
+ LeaseStealingClaimTimeoutMillis: DefaultLeaseStealingClaimTimeoutMillis,
+ LeaseSyncingTimeIntervalMillis: DefaultLeaseSyncingIntervalMillis,
+ Logger: logger.GetDefaultLogger(),
+ }
+}
+
+// WithKinesisEndpoint is used to provide an alternative Kinesis endpoint
+func (c *KinesisClientLibConfiguration) WithKinesisEndpoint(kinesisEndpoint string) *KinesisClientLibConfiguration {
+ c.KinesisEndpoint = kinesisEndpoint
+ return c
+}
+
+// WithDynamoDBEndpoint is used to provide an alternative DynamoDB endpoint
+func (c *KinesisClientLibConfiguration) WithDynamoDBEndpoint(dynamoDBEndpoint string) *KinesisClientLibConfiguration {
+ c.DynamoDBEndpoint = dynamoDBEndpoint
+ return c
+}
+
+// WithTableName to provide alternative lease table in DynamoDB
+func (c *KinesisClientLibConfiguration) WithTableName(tableName string) *KinesisClientLibConfiguration {
+ c.TableName = tableName
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithInitialPositionInStream(initialPositionInStream InitialPositionInStream) *KinesisClientLibConfiguration {
+ c.InitialPositionInStream = initialPositionInStream
+ c.InitialPositionInStreamExtended = *newInitialPosition(initialPositionInStream)
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithTimestampAtInitialPositionInStream(timestamp *time.Time) *KinesisClientLibConfiguration {
+ c.InitialPositionInStream = AT_TIMESTAMP
+ c.InitialPositionInStreamExtended = *newInitialPositionAtTimestamp(timestamp)
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithFailoverTimeMillis(failoverTimeMillis int) *KinesisClientLibConfiguration {
+ checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis)
+ c.FailoverTimeMillis = failoverTimeMillis
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithLeaseRefreshPeriodMillis(leaseRefreshPeriodMillis int) *KinesisClientLibConfiguration {
+ checkIsValuePositive("LeaseRefreshPeriodMillis", leaseRefreshPeriodMillis)
+ c.LeaseRefreshPeriodMillis = leaseRefreshPeriodMillis
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithShardSyncIntervalMillis(shardSyncIntervalMillis int) *KinesisClientLibConfiguration {
+ checkIsValuePositive("ShardSyncIntervalMillis", shardSyncIntervalMillis)
+ c.ShardSyncIntervalMillis = shardSyncIntervalMillis
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithMaxRecords(maxRecords int) *KinesisClientLibConfiguration {
+ checkIsValuePositive("MaxRecords", maxRecords)
+ c.MaxRecords = maxRecords
+ return c
+}
+
+// WithMaxLeasesForWorker configures maximum lease this worker can handles. It determines how maximun number of shards
+// this worker can handle.
+func (c *KinesisClientLibConfiguration) WithMaxLeasesForWorker(n int) *KinesisClientLibConfiguration {
+ checkIsValuePositive("MaxLeasesForWorker", n)
+ c.MaxLeasesForWorker = n
+ return c
+}
+
+// WithIdleTimeBetweenReadsInMillis
+// Controls how long the KCL will sleep if no records are returned from Kinesis
+//
+//
+// This value is only used when no records are returned; if records are returned, the {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask} will
+// immediately retrieve the next set of records after the call to
+// {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#processRecords(ProcessRecordsInput)}
+// has returned. Setting this value to high may result in the KCL being unable to catch up. If you are changing this
+// value it's recommended that you enable {@link #withCallProcessRecordsEvenForEmptyRecordList(boolean)}, and
+// monitor how far behind the records retrieved are by inspecting
+// {@link com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput#getMillisBehindLatest()}, and the
+// CloudWatch
+// Metric: GetRecords.MillisBehindLatest
+//
+//
+// @param IdleTimeBetweenReadsInMillis: how long to sleep between GetRecords calls when no records are returned.
+// @return KinesisClientLibConfiguration
+func (c *KinesisClientLibConfiguration) WithIdleTimeBetweenReadsInMillis(idleTimeBetweenReadsInMillis int) *KinesisClientLibConfiguration {
+ checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis)
+ c.IdleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithCallProcessRecordsEvenForEmptyRecordList(callProcessRecordsEvenForEmptyRecordList bool) *KinesisClientLibConfiguration {
+ c.CallProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithTaskBackoffTimeMillis(taskBackoffTimeMillis int) *KinesisClientLibConfiguration {
+ checkIsValuePositive("TaskBackoffTimeMillis", taskBackoffTimeMillis)
+ c.TaskBackoffTimeMillis = taskBackoffTimeMillis
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithLogger(logger logger.Logger) *KinesisClientLibConfiguration {
+ if logger == nil {
+ log.Panic("Logger cannot be null")
+ }
+ c.Logger = logger
+ return c
+}
+
+// WithMonitoringService sets the monitoring service to use to publish metrics.
+func (c *KinesisClientLibConfiguration) WithMonitoringService(mService metrics.MonitoringService) *KinesisClientLibConfiguration {
+ // Nil case is handled downward (at worker creation) so no need to do it here.
+ // Plus the user might want to be explicit about passing a nil monitoring service here.
+ c.MonitoringService = mService
+ return c
+}
+
+// WithEnhancedFanOutConsumer sets EnableEnhancedFanOutConsumer. If enhanced fan-out is enabled and ConsumerName is not specified ApplicationName is used as ConsumerName.
+// For more info see: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html
+// Note: You can register up to twenty consumers per stream to use enhanced fan-out.
+func (c *KinesisClientLibConfiguration) WithEnhancedFanOutConsumer(enable bool) *KinesisClientLibConfiguration {
+ c.EnableEnhancedFanOutConsumer = enable
+ return c
+}
+
+// WithEnhancedFanOutConsumerName enables enhanced fan-out consumer with the specified name
+// For more info see: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html
+// Note: You can register up to twenty consumers per stream to use enhanced fan-out.
+func (c *KinesisClientLibConfiguration) WithEnhancedFanOutConsumerName(consumerName string) *KinesisClientLibConfiguration {
+ checkIsValueNotEmpty("EnhancedFanOutConsumerName", consumerName)
+ c.EnhancedFanOutConsumerName = consumerName
+ c.EnableEnhancedFanOutConsumer = true
+ return c
+}
+
+// WithEnhancedFanOutConsumerARN enables enhanced fan-out consumer with the specified consumer ARN
+// For more info see: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html
+// Note: You can register up to twenty consumers per stream to use enhanced fan-out.
+func (c *KinesisClientLibConfiguration) WithEnhancedFanOutConsumerARN(consumerARN string) *KinesisClientLibConfiguration {
+ checkIsValueNotEmpty("EnhancedFanOutConsumerARN", consumerARN)
+ c.EnhancedFanOutConsumerARN = consumerARN
+ c.EnableEnhancedFanOutConsumer = true
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithLeaseStealing(enableLeaseStealing bool) *KinesisClientLibConfiguration {
+ c.EnableLeaseStealing = enableLeaseStealing
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithLeaseStealingIntervalMillis(leaseStealingIntervalMillis int) *KinesisClientLibConfiguration {
+ c.LeaseStealingIntervalMillis = leaseStealingIntervalMillis
+ return c
+}
+
+func (c *KinesisClientLibConfiguration) WithLeaseSyncingIntervalMillis(leaseSyncingIntervalMillis int) *KinesisClientLibConfiguration {
+ c.LeaseSyncingTimeIntervalMillis = leaseSyncingIntervalMillis
+ return c
+}
diff --git a/clientlibrary/interfaces/inputs.go b/clientlibrary/interfaces/inputs.go
new file mode 100644
index 0000000..2336af8
--- /dev/null
+++ b/clientlibrary/interfaces/inputs.go
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2020 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package interfaces
+// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client
+/*
+ * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package interfaces
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis/types"
+)
+
+const (
+ /*
+ * REQUESTED Indicates that the entire application is being shutdown, and if desired the record processor will be given a
+ * final chance to checkpoint. This state will not trigger a direct call to
+ * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput)}, but
+ * instead depend on a different interface for backward compatibility.
+ */
+ REQUESTED ShutdownReason = iota + 1
+
+ /*
+ * Terminate processing for this RecordProcessor (resharding use case).
+ * Indicates that the shard is closed and all records from the shard have been delivered to the application.
+ * Applications SHOULD checkpoint their progress to indicate that they have successfully processed all records
+ * from this shard and processing of child shards can be started.
+ */
+ TERMINATE
+
+ /*
+ * Processing will be moved to a different record processor (fail over, load balancing use cases).
+ * Applications SHOULD NOT checkpoint their progress (as another record processor may have already started
+ * processing data).
+ */
+ ZOMBIE
+)
+
+// Containers for the parameters to the IRecordProcessor
+type (
+ /*
+ * Reason the RecordProcessor is being shutdown.
+ * Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered).
+ * In case of a fail-over, applications should NOT checkpoint as part of shutdown,
+ * since another record processor may have already started processing records for that shard.
+ * In case of termination (resharding use case), applications SHOULD keep checkpointing their progress to indicate
+ * that they have successfully processed all the records (processing of child shards can then begin).
+ */
+ ShutdownReason int
+
+ InitializationInput struct {
+ // The shardId that the record processor is being initialized for.
+ ShardId string
+
+ // The last extended sequence number that was successfully checkpointed by the previous record processor.
+ ExtendedSequenceNumber *ExtendedSequenceNumber
+ }
+
+ ProcessRecordsInput struct {
+ // The time that this batch of records was received by the KCL.
+ CacheEntryTime *time.Time
+
+ // The time that this batch of records was prepared to be provided to the RecordProcessor.
+ CacheExitTime *time.Time
+
+ // The records received from Kinesis. These records may have been de-aggregated if they were published by the KPL.
+ Records []types.Record
+
+ // A checkpointer that the RecordProcessor can use to checkpoint its progress.
+ Checkpointer IRecordProcessorCheckpointer
+
+ // How far behind this batch of records was when received from Kinesis.
+ MillisBehindLatest int64
+ }
+
+ ShutdownInput struct {
+ // ShutdownReason shows why RecordProcessor is going to be shutdown.
+ ShutdownReason ShutdownReason
+
+ // Checkpointer is used to record the current progress.
+ Checkpointer IRecordProcessorCheckpointer
+ }
+)
+
+var shutdownReasonMap = map[ShutdownReason]*string{
+ REQUESTED: aws.String("REQUESTED"),
+ TERMINATE: aws.String("TERMINATE"),
+ ZOMBIE: aws.String("ZOMBIE"),
+}
+
+func ShutdownReasonMessage(reason ShutdownReason) *string {
+ return shutdownReasonMap[reason]
+}
diff --git a/clientlibrary/interfaces/record-processor-checkpointer.go b/clientlibrary/interfaces/record-processor-checkpointer.go
new file mode 100644
index 0000000..cdb1f53
--- /dev/null
+++ b/clientlibrary/interfaces/record-processor-checkpointer.go
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package interfaces
+// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client
+/*
+ * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package interfaces
+
+type (
+ IPreparedCheckpointer interface {
+ GetPendingCheckpoint() *ExtendedSequenceNumber
+
+ // Checkpoint
+ /*
+ * This method will record a pending checkpoint.
+ *
+ * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently.
+ * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency.
+ * @error ShutdownError The record processor instance has been shutdown. Another instance may have
+ * started processing some of these records already.
+ * The application should abort processing via this RecordProcessor instance.
+ * @error InvalidStateError Can't store checkpoint.
+ * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist).
+ * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can
+ * backoff and retry.
+ * @error IllegalArgumentError The sequence number being checkpointed is invalid because it is out of range,
+ * i.e. it is smaller than the last check point value (prepared or committed), or larger than the greatest
+ * sequence number seen by the associated record processor.
+ */
+ Checkpoint() error
+ }
+
+ // IRecordProcessorCheckpointer
+ /*
+ * Used by RecordProcessors when they want to checkpoint their progress.
+ * The Kinesis Client Library will pass an object implementing this interface to RecordProcessors, so they can
+ * checkpoint their progress.
+ */
+ IRecordProcessorCheckpointer interface {
+ // Checkpoint
+ /*
+ * This method will checkpoint the progress at the provided sequenceNumber. This method is analogous to
+ * {@link #checkpoint()} but provides the ability to specify the sequence number at which to
+ * checkpoint.
+ *
+ * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover,
+ * the Kinesis Client Library will start fetching records after this sequence number.
+ * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently.
+ * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency.
+ * @error ShutdownError The record processor instance has been shutdown. Another instance may have
+ * started processing some of these records already.
+ * The application should abort processing via this RecordProcessor instance.
+ * @error InvalidStateError Can't store checkpoint.
+ * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist).
+ * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can
+ * backoff and retry.
+ * @error IllegalArgumentError The sequence number is invalid for one of the following reasons:
+ * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the
+ * greatest sequence number seen by the associated record processor.
+ * 2.) It is not a valid sequence number for a record in this shard.
+ */
+ Checkpoint(sequenceNumber *string) error
+
+ // PrepareCheckpoint
+ /**
+ * This method will record a pending checkpoint at the provided sequenceNumber.
+ *
+ * @param sequenceNumber A sequence number at which to prepare checkpoint in this shard.
+
+ * @return an IPreparedCheckpointer object that can be called later to persist the checkpoint.
+ *
+ * @error ThrottlingError Can't store pending checkpoint. Can be caused by checkpointing too frequently.
+ * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency.
+ * @error ShutdownError The record processor instance has been shutdown. Another instance may have
+ * started processing some of these records already.
+ * The application should abort processing via this RecordProcessor instance.
+ * @error InvalidStateError Can't store pending checkpoint.
+ * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist).
+ * @error KinesisClientLibDependencyError Encountered an issue when storing the pending checkpoint. The
+ * application can backoff and retry.
+ * @error IllegalArgumentError The sequence number is invalid for one of the following reasons:
+ * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the
+ * greatest sequence number seen by the associated record processor.
+ * 2.) It is not a valid sequence number for a record in this shard.
+ */
+ PrepareCheckpoint(sequenceNumber *string) (IPreparedCheckpointer, error)
+ }
+)
diff --git a/clientlibrary/interfaces/record-processor.go b/clientlibrary/interfaces/record-processor.go
new file mode 100644
index 0000000..1c41d56
--- /dev/null
+++ b/clientlibrary/interfaces/record-processor.go
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package interfaces
+// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client
+/*
+ * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package interfaces
+
+type (
+ // IRecordProcessor is the interface for some callback functions invoked by KCL will
+ // The main task of using KCL is to provide implementation on IRecordProcessor interface.
+ // Note: This is exactly the same interface as Amazon KCL IRecordProcessor v2
+ IRecordProcessor interface {
+ // Initialize
+ /*
+ * Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance
+ * (via processRecords).
+ *
+ * @param initializationInput Provides information related to initialization
+ */
+ Initialize(initializationInput *InitializationInput)
+
+ // ProcessRecords
+ /*
+ * Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the
+ * application.
+ * Upon fail over, the new instance will get records with sequence number > checkpoint position
+ * for each partition key.
+ *
+ * @param processRecordsInput Provides the records to be processed as well as information and capabilities related
+ * to them (eg checkpointing).
+ */
+ ProcessRecords(processRecordsInput *ProcessRecordsInput)
+
+ // Shutdown
+ /*
+ * Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this
+ * RecordProcessor instance.
+ *
+ * Warning
+ *
+ * When the value of {@link ShutdownInput#getShutdownReason()} is
+ * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you
+ * checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress.
+ *
+ * @param shutdownInput
+ * Provides information and capabilities (eg checkpointing) related to shutdown of this record processor.
+ */
+ Shutdown(shutdownInput *ShutdownInput)
+ }
+
+ // IRecordProcessorFactory is interface for creating IRecordProcessor. Each Worker can have multiple threads
+ // for processing shard. Client can choose either creating one processor per shard or sharing them.
+ IRecordProcessorFactory interface {
+
+ // CreateProcessor
+ /*
+ * Returns a record processor to be used for processing data records for a (assigned) shard.
+ *
+ * @return Returns a processor object.
+ */
+ CreateProcessor() IRecordProcessor
+ }
+)
diff --git a/clientlibrary/interfaces/sequence-number.go b/clientlibrary/interfaces/sequence-number.go
new file mode 100644
index 0000000..8cec8a9
--- /dev/null
+++ b/clientlibrary/interfaces/sequence-number.go
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package interfaces
+// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client
+/*
+ * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Amazon Software License (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/asl/
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package interfaces
+
+// ExtendedSequenceNumber represents a two-part sequence number for records aggregated by the Kinesis Producer Library.
+//
+// The KPL combines multiple user records into a single Kinesis record. Each user record therefore has an integer
+// sub-sequence number, in addition to the regular sequence number of the Kinesis record. The sub-sequence number
+// is used to checkpoint within an aggregated record.
+type ExtendedSequenceNumber struct {
+ SequenceNumber *string
+ SubSequenceNumber int64
+}
diff --git a/clientlibrary/metrics/cloudwatch/cloudwatch.go b/clientlibrary/metrics/cloudwatch/cloudwatch.go
new file mode 100644
index 0000000..144ce85
--- /dev/null
+++ b/clientlibrary/metrics/cloudwatch/cloudwatch.go
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package cloudwatch
+// The implementation is derived from https://github.com/patrobinson/gokini
+//
+// Copyright 2018 Patrick robinson
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+package cloudwatch
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+
+ cwatch "github.com/aws/aws-sdk-go-v2/service/cloudwatch"
+ "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
+
+ "github.com/vmware/vmware-go-kcl/logger"
+)
+
+// DefaultCloudwatchMetricsBufferDuration Buffer metrics for at most this long before publishing to CloudWatch.
+const DefaultCloudwatchMetricsBufferDuration = 10 * time.Second
+
+type MonitoringService struct {
+ appName string
+ streamName string
+ workerID string
+ region string
+ credentials *credentials.StaticCredentialsProvider
+ logger logger.Logger
+
+ // control how often to publish to CloudWatch
+ bufferDuration time.Duration
+
+ stop *chan struct{}
+ waitGroup *sync.WaitGroup
+ svc *cwatch.Client
+ shardMetrics *sync.Map
+}
+
+type cloudWatchMetrics struct {
+ sync.Mutex
+
+ processedRecords int64
+ processedBytes int64
+ behindLatestMillis []float64
+ leasesHeld int64
+ leaseRenewals int64
+ getRecordsTime []float64
+ processRecordsTime []float64
+}
+
+// NewMonitoringService returns a Monitoring service publishing metrics to CloudWatch.
+func NewMonitoringService(region string, creds *credentials.StaticCredentialsProvider) *MonitoringService {
+ return NewMonitoringServiceWithOptions(region, creds, logger.GetDefaultLogger(), DefaultCloudwatchMetricsBufferDuration)
+}
+
+// NewMonitoringServiceWithOptions returns a Monitoring service publishing metrics to
+// CloudWatch with the provided credentials, buffering duration and logger.
+func NewMonitoringServiceWithOptions(region string, creds *credentials.StaticCredentialsProvider, logger logger.Logger, bufferDur time.Duration) *MonitoringService {
+ return &MonitoringService{
+ region: region,
+ credentials: creds,
+ logger: logger,
+ bufferDuration: bufferDur,
+ }
+}
+
+func (cw *MonitoringService) Init(appName, streamName, workerID string) error {
+ cw.appName = appName
+ cw.streamName = streamName
+ cw.workerID = workerID
+
+ cfg := &aws.Config{Region: cw.region}
+ cfg.Credentials = cw.credentials
+
+ cw.svc = cwatch.NewFromConfig(*cfg)
+ cw.shardMetrics = &sync.Map{}
+
+ stopChan := make(chan struct{})
+ cw.stop = &stopChan
+ wg := sync.WaitGroup{}
+ cw.waitGroup = &wg
+
+ return nil
+}
+
+func (cw *MonitoringService) Start() error {
+ cw.waitGroup.Add(1)
+ // entering eventloop for sending metrics to CloudWatch
+ go cw.eventloop()
+ return nil
+}
+
+func (cw *MonitoringService) Shutdown() {
+ cw.logger.Infof("Shutting down cloudwatch metrics system...")
+ close(*cw.stop)
+ cw.waitGroup.Wait()
+ cw.logger.Infof("Cloudwatch metrics system has been shutdown.")
+}
+
+// eventloop start daemon to flush metrics periodically
+func (cw *MonitoringService) eventloop() {
+ defer cw.waitGroup.Done()
+
+ for {
+ if err := cw.flush(); err != nil {
+ cw.logger.Errorf("Error sending metrics to CloudWatch. %+v", err)
+ }
+
+ select {
+ case <-*cw.stop:
+ cw.logger.Infof("Shutting down monitoring system")
+ if err := cw.flush(); err != nil {
+ cw.logger.Errorf("Error sending metrics to CloudWatch. %+v", err)
+ }
+ return
+ case <-time.After(cw.bufferDuration):
+ }
+ }
+}
+
+func (cw *MonitoringService) flushShard(shard string, metric *cloudWatchMetrics) bool {
+ metric.Lock()
+ defaultDimensions := []types.Dimension{
+ {
+ Name: aws.String("Shard"),
+ Value: &shard,
+ },
+ {
+ Name: aws.String("KinesisStreamName"),
+ Value: &cw.streamName,
+ },
+ }
+
+ leaseDimensions := []types.Dimension{
+ {
+ Name: aws.String("Shard"),
+ Value: &shard,
+ },
+ {
+ Name: aws.String("KinesisStreamName"),
+ Value: &cw.streamName,
+ },
+ {
+ Name: aws.String("WorkerID"),
+ Value: &cw.workerID,
+ },
+ }
+ metricTimestamp := time.Now()
+
+ data := []types.MetricDatum{
+ {
+ Dimensions: defaultDimensions,
+ MetricName: aws.String("RecordsProcessed"),
+ Unit: types.StandardUnitCount,
+ Timestamp: &metricTimestamp,
+ Value: aws.Float64(float64(metric.processedRecords)),
+ },
+ {
+ Dimensions: defaultDimensions,
+ MetricName: aws.String("DataBytesProcessed"),
+ Unit: types.StandardUnitBytes,
+ Timestamp: &metricTimestamp,
+ Value: aws.Float64(float64(metric.processedBytes)),
+ },
+ {
+ Dimensions: leaseDimensions,
+ MetricName: aws.String("RenewLease.Success"),
+ Unit: types.StandardUnitCount,
+ Timestamp: &metricTimestamp,
+ Value: aws.Float64(float64(metric.leaseRenewals)),
+ },
+ {
+ Dimensions: leaseDimensions,
+ MetricName: aws.String("CurrentLeases"),
+ Unit: types.StandardUnitCount,
+ Timestamp: &metricTimestamp,
+ Value: aws.Float64(float64(metric.leasesHeld)),
+ },
+ }
+
+ if len(metric.behindLatestMillis) > 0 {
+ data = append(data, types.MetricDatum{
+ Dimensions: defaultDimensions,
+ MetricName: aws.String("MillisBehindLatest"),
+ Unit: types.StandardUnitMilliseconds,
+ Timestamp: &metricTimestamp,
+ StatisticValues: &types.StatisticSet{
+ SampleCount: aws.Float64(float64(len(metric.behindLatestMillis))),
+ Sum: sumFloat64(metric.behindLatestMillis),
+ Maximum: maxFloat64(metric.behindLatestMillis),
+ Minimum: minFloat64(metric.behindLatestMillis),
+ }})
+ }
+
+ if len(metric.getRecordsTime) > 0 {
+ data = append(data, types.MetricDatum{
+ Dimensions: defaultDimensions,
+ MetricName: aws.String("KinesisDataFetcher.getRecords.Time"),
+ Unit: types.StandardUnitMilliseconds,
+ Timestamp: &metricTimestamp,
+ StatisticValues: &types.StatisticSet{
+ SampleCount: aws.Float64(float64(len(metric.getRecordsTime))),
+ Sum: sumFloat64(metric.getRecordsTime),
+ Maximum: maxFloat64(metric.getRecordsTime),
+ Minimum: minFloat64(metric.getRecordsTime),
+ }})
+ }
+
+ if len(metric.processRecordsTime) > 0 {
+ data = append(data, types.MetricDatum{
+ Dimensions: defaultDimensions,
+ MetricName: aws.String("RecordProcessor.processRecords.Time"),
+ Unit: types.StandardUnitMilliseconds,
+ Timestamp: &metricTimestamp,
+ StatisticValues: &types.StatisticSet{
+ SampleCount: aws.Float64(float64(len(metric.processRecordsTime))),
+ Sum: sumFloat64(metric.processRecordsTime),
+ Maximum: maxFloat64(metric.processRecordsTime),
+ Minimum: minFloat64(metric.processRecordsTime),
+ }})
+ }
+
+ // Publish metrics data to cloud watch
+ _, err := cw.svc.PutMetricData(context.TODO(), &cwatch.PutMetricDataInput{
+ Namespace: aws.String(cw.appName),
+ MetricData: data,
+ })
+
+ if err == nil {
+ metric.processedRecords = 0
+ metric.processedBytes = 0
+ metric.behindLatestMillis = []float64{}
+ metric.leaseRenewals = 0
+ metric.getRecordsTime = []float64{}
+ metric.processRecordsTime = []float64{}
+ } else {
+ cw.logger.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err)
+ }
+
+ metric.Unlock()
+ return true
+}
+
+func (cw *MonitoringService) flush() error {
+ cw.logger.Debugf("Flushing metrics data. Stream: %s, Worker: %s", cw.streamName, cw.workerID)
+ // publish per shard metrics
+ cw.shardMetrics.Range(func(k, v interface{}) bool {
+ shard, metric := k.(string), v.(*cloudWatchMetrics)
+ return cw.flushShard(shard, metric)
+ })
+
+ return nil
+}
+
+func (cw *MonitoringService) IncrRecordsProcessed(shard string, count int) {
+ m := cw.getOrCreatePerShardMetrics(shard)
+ m.Lock()
+ defer m.Unlock()
+ m.processedRecords += int64(count)
+}
+
+func (cw *MonitoringService) IncrBytesProcessed(shard string, count int64) {
+ m := cw.getOrCreatePerShardMetrics(shard)
+ m.Lock()
+ defer m.Unlock()
+ m.processedBytes += count
+}
+
+func (cw *MonitoringService) MillisBehindLatest(shard string, millSeconds float64) {
+ m := cw.getOrCreatePerShardMetrics(shard)
+ m.Lock()
+ defer m.Unlock()
+ m.behindLatestMillis = append(m.behindLatestMillis, millSeconds)
+}
+
+func (cw *MonitoringService) LeaseGained(shard string) {
+ m := cw.getOrCreatePerShardMetrics(shard)
+ m.Lock()
+ defer m.Unlock()
+ m.leasesHeld++
+}
+
+func (cw *MonitoringService) LeaseLost(shard string) {
+ m := cw.getOrCreatePerShardMetrics(shard)
+ m.Lock()
+ defer m.Unlock()
+ m.leasesHeld--
+}
+
+func (cw *MonitoringService) LeaseRenewed(shard string) {
+ m := cw.getOrCreatePerShardMetrics(shard)
+ m.Lock()
+ defer m.Unlock()
+ m.leaseRenewals++
+}
+
+func (cw *MonitoringService) RecordGetRecordsTime(shard string, time float64) {
+ m := cw.getOrCreatePerShardMetrics(shard)
+ m.Lock()
+ defer m.Unlock()
+ m.getRecordsTime = append(m.getRecordsTime, time)
+}
+func (cw *MonitoringService) RecordProcessRecordsTime(shard string, time float64) {
+ m := cw.getOrCreatePerShardMetrics(shard)
+ m.Lock()
+ defer m.Unlock()
+ m.processRecordsTime = append(m.processRecordsTime, time)
+}
+
+func (cw *MonitoringService) getOrCreatePerShardMetrics(shard string) *cloudWatchMetrics {
+ var i interface{}
+ var ok bool
+ if i, ok = cw.shardMetrics.Load(shard); !ok {
+ m := &cloudWatchMetrics{}
+ cw.shardMetrics.Store(shard, m)
+ return m
+ }
+
+ return i.(*cloudWatchMetrics)
+}
+
+func sumFloat64(slice []float64) *float64 {
+ sum := float64(0)
+ for _, num := range slice {
+ sum += num
+ }
+ return &sum
+}
+
+func maxFloat64(slice []float64) *float64 {
+ if len(slice) < 1 {
+ return aws.Float64(0)
+ }
+ max := slice[0]
+ for _, num := range slice {
+ if num > max {
+ max = num
+ }
+ }
+ return &max
+}
+
+func minFloat64(slice []float64) *float64 {
+ if len(slice) < 1 {
+ return aws.Float64(0)
+ }
+ min := slice[0]
+ for _, num := range slice {
+ if num < min {
+ min = num
+ }
+ }
+ return &min
+}
diff --git a/clientlibrary/metrics/interfaces.go b/clientlibrary/metrics/interfaces.go
new file mode 100644
index 0000000..8762a49
--- /dev/null
+++ b/clientlibrary/metrics/interfaces.go
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package metrics
+// The implementation is derived from https://github.com/patrobinson/gokini
+//
+// Copyright 2018 Patrick robinson
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+package metrics
+
+type MonitoringService interface {
+ Init(appName, streamName, workerID string) error
+ Start() error
+ IncrRecordsProcessed(shard string, count int)
+ IncrBytesProcessed(shard string, count int64)
+ MillisBehindLatest(shard string, milliSeconds float64)
+ LeaseGained(shard string)
+ LeaseLost(shard string)
+ LeaseRenewed(shard string)
+ RecordGetRecordsTime(shard string, time float64)
+ RecordProcessRecordsTime(shard string, time float64)
+ Shutdown()
+}
+
+// NoopMonitoringService implements MonitoringService by does nothing.
+type NoopMonitoringService struct{}
+
+func (NoopMonitoringService) Init(_, _, _ string) error { return nil }
+func (NoopMonitoringService) Start() error { return nil }
+func (NoopMonitoringService) Shutdown() {}
+
+func (NoopMonitoringService) IncrRecordsProcessed(_ string, _ int) {}
+func (NoopMonitoringService) IncrBytesProcessed(_ string, _ int64) {}
+func (NoopMonitoringService) MillisBehindLatest(_ string, _ float64) {}
+func (NoopMonitoringService) LeaseGained(_ string) {}
+func (NoopMonitoringService) LeaseLost(_ string) {}
+func (NoopMonitoringService) LeaseRenewed(_ string) {}
+func (NoopMonitoringService) RecordGetRecordsTime(_ string, _ float64) {}
+func (NoopMonitoringService) RecordProcessRecordsTime(_ string, _ float64) {}
diff --git a/clientlibrary/metrics/prometheus/prometheus.go b/clientlibrary/metrics/prometheus/prometheus.go
new file mode 100644
index 0000000..b2b4915
--- /dev/null
+++ b/clientlibrary/metrics/prometheus/prometheus.go
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package prometheus
+// The implementation is derived from https://github.com/patrobinson/gokini
+//
+// Copyright 2018 Patrick robinson
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+package prometheus
+
+import (
+ "net/http"
+
+ prom "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+
+ "github.com/vmware/vmware-go-kcl/logger"
+)
+
+// MonitoringService publishes kcl metrics to Prometheus.
+// It might be trick if the service onboarding with KCL already uses Prometheus.
+type MonitoringService struct {
+ listenAddress string
+ namespace string
+ streamName string
+ workerID string
+ region string
+ logger logger.Logger
+
+ processedRecords *prom.CounterVec
+ processedBytes *prom.CounterVec
+ behindLatestMillis *prom.GaugeVec
+ leasesHeld *prom.GaugeVec
+ leaseRenewals *prom.CounterVec
+ getRecordsTime *prom.HistogramVec
+ processRecordsTime *prom.HistogramVec
+}
+
+// NewMonitoringService returns a Monitoring service publishing metrics to Prometheus.
+func NewMonitoringService(listenAddress, region string, logger logger.Logger) *MonitoringService {
+ return &MonitoringService{
+ listenAddress: listenAddress,
+ region: region,
+ logger: logger,
+ }
+}
+
+func (p *MonitoringService) Init(appName, streamName, workerID string) error {
+ p.namespace = appName
+ p.streamName = streamName
+ p.workerID = workerID
+
+ p.processedBytes = prom.NewCounterVec(prom.CounterOpts{
+ Name: p.namespace + `_processed_bytes`,
+ Help: "Number of bytes processed",
+ }, []string{"kinesisStream", "shard"})
+ p.processedRecords = prom.NewCounterVec(prom.CounterOpts{
+ Name: p.namespace + `_processed_records`,
+ Help: "Number of records processed",
+ }, []string{"kinesisStream", "shard"})
+ p.behindLatestMillis = prom.NewGaugeVec(prom.GaugeOpts{
+ Name: p.namespace + `_behind_latest_millis`,
+ Help: "The amount of milliseconds processing is behind",
+ }, []string{"kinesisStream", "shard"})
+ p.leasesHeld = prom.NewGaugeVec(prom.GaugeOpts{
+ Name: p.namespace + `_leases_held`,
+ Help: "The number of leases held by the worker",
+ }, []string{"kinesisStream", "shard", "workerID"})
+ p.leaseRenewals = prom.NewCounterVec(prom.CounterOpts{
+ Name: p.namespace + `_lease_renewals`,
+ Help: "The number of successful lease renewals",
+ }, []string{"kinesisStream", "shard", "workerID"})
+ p.getRecordsTime = prom.NewHistogramVec(prom.HistogramOpts{
+ Name: p.namespace + `_get_records_duration_milliseconds`,
+ Help: "The time taken to fetch records and process them",
+ }, []string{"kinesisStream", "shard"})
+ p.processRecordsTime = prom.NewHistogramVec(prom.HistogramOpts{
+ Name: p.namespace + `_process_records_duration_milliseconds`,
+ Help: "The time taken to process records",
+ }, []string{"kinesisStream", "shard"})
+
+ metrics := []prom.Collector{
+ p.processedBytes,
+ p.processedRecords,
+ p.behindLatestMillis,
+ p.leasesHeld,
+ p.leaseRenewals,
+ p.getRecordsTime,
+ p.processRecordsTime,
+ }
+ for _, metric := range metrics {
+ err := prom.Register(metric)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (p *MonitoringService) Start() error {
+ http.Handle("/metrics", promhttp.Handler())
+ go func() {
+ p.logger.Infof("Starting Prometheus listener on %s", p.listenAddress)
+ err := http.ListenAndServe(p.listenAddress, nil)
+ if err != nil {
+ p.logger.Errorf("Error starting Prometheus metrics endpoint. %+v", err)
+ }
+ p.logger.Infof("Stopped metrics server")
+ }()
+
+ return nil
+}
+
+func (p *MonitoringService) Shutdown() {}
+
+func (p *MonitoringService) IncrRecordsProcessed(shard string, count int) {
+ p.processedRecords.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Add(float64(count))
+}
+
+func (p *MonitoringService) IncrBytesProcessed(shard string, count int64) {
+ p.processedBytes.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Add(float64(count))
+}
+
+func (p *MonitoringService) MillisBehindLatest(shard string, millSeconds float64) {
+ p.behindLatestMillis.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Set(millSeconds)
+}
+
+func (p *MonitoringService) LeaseGained(shard string) {
+ p.leasesHeld.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName, "workerID": p.workerID}).Inc()
+}
+
+func (p *MonitoringService) LeaseLost(shard string) {
+ p.leasesHeld.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName, "workerID": p.workerID}).Dec()
+}
+
+func (p *MonitoringService) LeaseRenewed(shard string) {
+ p.leaseRenewals.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName, "workerID": p.workerID}).Inc()
+}
+
+func (p *MonitoringService) RecordGetRecordsTime(shard string, time float64) {
+ p.getRecordsTime.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Observe(time)
+}
+
+func (p *MonitoringService) RecordProcessRecordsTime(shard string, time float64) {
+ p.processRecordsTime.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Observe(time)
+}
diff --git a/clientlibrary/partition/partition.go b/clientlibrary/partition/partition.go
new file mode 100644
index 0000000..5524416
--- /dev/null
+++ b/clientlibrary/partition/partition.go
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package partition
+// The implementation is derived from https://github.com/patrobinson/gokini
+//
+// Copyright 2018 Patrick robinson
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+package partition
+
+import (
+ "sync"
+ "time"
+
+ "github.com/vmware/vmware-go-kcl/clientlibrary/config"
+)
+
+type ShardStatus struct {
+ ID string
+ ParentShardId string
+ Checkpoint string
+ AssignedTo string
+ Mux *sync.RWMutex
+ LeaseTimeout time.Time
+ // Shard Range
+ StartingSequenceNumber string
+ // child shard doesn't have end sequence number
+ EndingSequenceNumber string
+ ClaimRequest string
+}
+
+func (ss *ShardStatus) GetLeaseOwner() string {
+ ss.Mux.RLock()
+ defer ss.Mux.RUnlock()
+ return ss.AssignedTo
+}
+
+func (ss *ShardStatus) SetLeaseOwner(owner string) {
+ ss.Mux.Lock()
+ defer ss.Mux.Unlock()
+ ss.AssignedTo = owner
+}
+
+func (ss *ShardStatus) GetCheckpoint() string {
+ ss.Mux.RLock()
+ defer ss.Mux.RUnlock()
+ return ss.Checkpoint
+}
+
+func (ss *ShardStatus) SetCheckpoint(c string) {
+ ss.Mux.Lock()
+ defer ss.Mux.Unlock()
+ ss.Checkpoint = c
+}
+
+func (ss *ShardStatus) GetLeaseTimeout() time.Time {
+ ss.Mux.Lock()
+ defer ss.Mux.Unlock()
+ return ss.LeaseTimeout
+}
+
+func (ss *ShardStatus) SetLeaseTimeout(timeout time.Time) {
+ ss.Mux.Lock()
+ defer ss.Mux.Unlock()
+ ss.LeaseTimeout = timeout
+}
+
+func (ss *ShardStatus) IsClaimRequestExpired(kclConfig *config.KinesisClientLibConfiguration) bool {
+ if leaseTimeout := ss.GetLeaseTimeout(); leaseTimeout.IsZero() {
+ return false
+ } else {
+ return leaseTimeout.
+ Before(time.Now().UTC().Add(time.Duration(-kclConfig.LeaseStealingClaimTimeoutMillis) * time.Millisecond))
+ }
+}
diff --git a/clientlibrary/utils/random.go b/clientlibrary/utils/random.go
new file mode 100644
index 0000000..7c45f91
--- /dev/null
+++ b/clientlibrary/utils/random.go
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package utils
+package utils
+
+import (
+ "crypto/rand"
+ "math/big"
+ "time"
+)
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+const (
+ letterIdxBits = 6 // 6 bits to represent a letter index
+ letterIdxMask = 1<= 0; {
+ if remain == 0 {
+ rnd, _ = rand.Int(rand.Reader, big.NewInt(seed))
+ cache, remain = rnd.Int64(), letterIdxMax
+ }
+ if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+ b[i] = letterBytes[idx]
+ i--
+ }
+ cache >>= letterIdxBits
+ remain--
+ }
+
+ return string(b)
+}
diff --git a/clientlibrary/utils/random_test.go b/clientlibrary/utils/random_test.go
new file mode 100644
index 0000000..1f51f74
--- /dev/null
+++ b/clientlibrary/utils/random_test.go
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package utils
+package utils
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+func TestRandom(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ s1 := RandStringBytesMaskImpr(10)
+ s2 := RandStringBytesMaskImpr(10)
+ if s1 == s2 {
+ t.Fatalf("failed in generating random string. s1: %s, s2: %s", s1, s2)
+ }
+ fmt.Println(s1)
+ fmt.Println(s2)
+ }
+}
+
+func TestRandomNum(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ seed := time.Now().UTC().Second()
+ s1 := RandStringBytesMaskImpr(seed)
+ s2 := RandStringBytesMaskImpr(seed)
+ if s1 == s2 {
+ t.Fatalf("failed in generating random string. s1: %s, s2: %s", s1, s2)
+ }
+ fmt.Println(s1)
+ fmt.Println(s2)
+ }
+}
diff --git a/clientlibrary/utils/uuid.go b/clientlibrary/utils/uuid.go
new file mode 100644
index 0000000..857347c
--- /dev/null
+++ b/clientlibrary/utils/uuid.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package utils
+package utils
+
+import (
+ guuid "github.com/google/uuid"
+)
+
+// MustNewUUID generates a new UUID and panics if failed
+func MustNewUUID() string {
+ id, err := guuid.NewUUID()
+ if err != nil {
+ panic(err)
+ }
+
+ return id.String()
+}
diff --git a/clientlibrary/worker/common-shard-consumer.go b/clientlibrary/worker/common-shard-consumer.go
new file mode 100644
index 0000000..a638f5f
--- /dev/null
+++ b/clientlibrary/worker/common-shard-consumer.go
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2021 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package worker
+package worker
+
+import (
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis/types"
+
+ chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint"
+ "github.com/vmware/vmware-go-kcl/clientlibrary/config"
+ kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces"
+ "github.com/vmware/vmware-go-kcl/clientlibrary/metrics"
+ par "github.com/vmware/vmware-go-kcl/clientlibrary/partition"
+ deagg "github.com/vmware/vmware-go-kcl/internal/deaggregator"
+)
+
+type shardConsumer interface {
+ getRecords() error
+}
+
+// commonShardConsumer implements common functionality for regular and enhanced fan-out consumers
+type commonShardConsumer struct {
+ shard *par.ShardStatus
+ kc *kinesis.Client
+ checkpointer chk.Checkpointer
+ recordProcessor kcl.IRecordProcessor
+ kclConfig *config.KinesisClientLibConfiguration
+ mService metrics.MonitoringService
+}
+
+// Cleanup the internal lease cache
+func (sc *commonShardConsumer) releaseLease() {
+ log := sc.kclConfig.Logger
+ log.Infof("Release lease for shard %s", sc.shard.ID)
+ sc.shard.SetLeaseOwner("")
+
+ // Release the lease by wiping out the lease owner for the shard
+ // Note: we don't need to do anything in case of error here and shard lease will eventually be expired.
+ if err := sc.checkpointer.RemoveLeaseOwner(sc.shard.ID); err != nil {
+ log.Errorf("Failed to release shard lease or shard: %s Error: %+v", sc.shard.ID, err)
+ }
+
+ // reporting lease lose metrics
+ sc.mService.LeaseLost(sc.shard.ID)
+}
+
+// getStartingPosition gets kinesis stating position.
+// First try to fetch checkpoint. If checkpoint is not found use InitialPositionInStream
+func (sc *commonShardConsumer) getStartingPosition() (*types.StartingPosition, error) {
+ err := sc.checkpointer.FetchCheckpoint(sc.shard)
+ if err != nil && err != chk.ErrSequenceIDNotFound {
+ return nil, err
+ }
+
+ checkpoint := sc.shard.GetCheckpoint()
+ if checkpoint != "" {
+ sc.kclConfig.Logger.Debugf("Start shard: %v at checkpoint: %v", sc.shard.ID, checkpoint)
+ return &types.StartingPosition{
+ Type: types.ShardIteratorTypeAfterSequenceNumber,
+ SequenceNumber: &checkpoint,
+ }, nil
+ }
+
+ shardIteratorType := config.InitalPositionInStreamToShardIteratorType(sc.kclConfig.InitialPositionInStream)
+ sc.kclConfig.Logger.Debugf("No checkpoint recorded for shard: %v, starting with: %v", sc.shard.ID, aws.ToString(shardIteratorType))
+ if sc.kclConfig.InitialPositionInStream == config.AT_TIMESTAMP {
+ return &types.StartingPosition{
+ Type: types.ShardIteratorTypeAtTimestamp,
+ Timestamp: sc.kclConfig.InitialPositionInStreamExtended.Timestamp,
+ }, nil
+ }
+
+ if *shardIteratorType == "TRIM_HORIZON" {
+ return &types.StartingPosition{
+ Type: types.ShardIteratorTypeTrimHorizon,
+ }, nil
+ }
+
+ return &types.StartingPosition{
+ Type: types.ShardIteratorTypeLatest,
+ }, nil
+}
+
+// Need to wait until the parent shard finished
+func (sc *commonShardConsumer) waitOnParentShard() error {
+ if len(sc.shard.ParentShardId) == 0 {
+ return nil
+ }
+
+ pshard := &par.ShardStatus{
+ ID: sc.shard.ParentShardId,
+ Mux: &sync.RWMutex{},
+ }
+
+ for {
+ if err := sc.checkpointer.FetchCheckpoint(pshard); err != nil {
+ return err
+ }
+
+ // Parent shard is finished.
+ if pshard.GetCheckpoint() == chk.ShardEnd {
+ return nil
+ }
+
+ time.Sleep(time.Duration(sc.kclConfig.ParentShardPollIntervalMillis) * time.Millisecond)
+ }
+}
+
+func (sc *commonShardConsumer) processRecords(getRecordsStartTime time.Time, records []types.Record, millisBehindLatest *int64, recordCheckpointer kcl.IRecordProcessorCheckpointer) {
+ log := sc.kclConfig.Logger
+
+ getRecordsTime := time.Since(getRecordsStartTime).Milliseconds()
+ sc.mService.RecordGetRecordsTime(sc.shard.ID, float64(getRecordsTime))
+
+ log.Debugf("Received %d original records.", len(records))
+
+ // De-aggregate the records if they were published by the KPL.
+ dars, err := deagg.DeaggregateRecords(records)
+ if err != nil {
+ // The error is caused by bad KPL publisher and just skip the bad records
+ // instead of being stuck here.
+ log.Errorf("Error in de-aggregating KPL records: %+v", err)
+ }
+
+ input := &kcl.ProcessRecordsInput{
+ Records: dars,
+ MillisBehindLatest: *millisBehindLatest,
+ Checkpointer: recordCheckpointer,
+ }
+
+ recordLength := len(input.Records)
+ recordBytes := int64(0)
+ log.Debugf("Received %d de-aggregated records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest)
+
+ for _, r := range input.Records {
+ recordBytes += int64(len(r.Data))
+ }
+
+ if recordLength > 0 || sc.kclConfig.CallProcessRecordsEvenForEmptyRecordList {
+ processRecordsStartTime := time.Now()
+
+ // Delivery the events to the record processor
+ input.CacheEntryTime = &getRecordsStartTime
+ input.CacheExitTime = &processRecordsStartTime
+ sc.recordProcessor.ProcessRecords(input)
+
+ processedRecordsTiming := time.Since(processRecordsStartTime).Milliseconds()
+ sc.mService.RecordProcessRecordsTime(sc.shard.ID, float64(processedRecordsTiming))
+ }
+
+ sc.mService.IncrRecordsProcessed(sc.shard.ID, recordLength)
+ sc.mService.IncrBytesProcessed(sc.shard.ID, recordBytes)
+ sc.mService.MillisBehindLatest(sc.shard.ID, float64(*millisBehindLatest))
+}
diff --git a/clientlibrary/worker/fan-out-shard-consumer.go b/clientlibrary/worker/fan-out-shard-consumer.go
new file mode 100644
index 0000000..76827f3
--- /dev/null
+++ b/clientlibrary/worker/fan-out-shard-consumer.go
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2021 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package worker
+package worker
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis/types"
+
+ chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint"
+ kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces"
+)
+
+// FanOutShardConsumer is responsible for consuming data records of a (specified) shard.
+// Note: FanOutShardConsumer only deal with one shard.
+// For more info see: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html
+type FanOutShardConsumer struct {
+ commonShardConsumer
+ consumerARN string
+ consumerID string
+ stop *chan struct{}
+}
+
+// getRecords subscribes to a shard and reads events from it.
+// Precondition: it currently has the lease on the shard.
+func (sc *FanOutShardConsumer) getRecords() error {
+ defer sc.releaseLease()
+
+ log := sc.kclConfig.Logger
+
+ // If the shard is child shard, need to wait until the parent finished.
+ if err := sc.waitOnParentShard(); err != nil {
+ // If parent shard has been deleted by Kinesis system already, just ignore the error.
+ if err != chk.ErrSequenceIDNotFound {
+ log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", sc.shard.ParentShardId, err)
+ return err
+ }
+ }
+
+ shardSub, err := sc.subscribeToShard()
+ if err != nil {
+ log.Errorf("Unable to subscribe to shard %s: %v", sc.shard.ID, err)
+ return err
+ }
+ defer func() {
+ if shardSub == nil || shardSub.GetStream() == nil {
+ log.Debugf("Nothing to close, EventStream is nil")
+ return
+ }
+ err = shardSub.GetStream().Close()
+ if err != nil {
+ log.Errorf("Unable to close event stream for %s: %v", sc.shard.ID, err)
+ }
+ }()
+
+ input := &kcl.InitializationInput{
+ ShardId: sc.shard.ID,
+ ExtendedSequenceNumber: &kcl.ExtendedSequenceNumber{SequenceNumber: aws.String(sc.shard.GetCheckpoint())},
+ }
+ sc.recordProcessor.Initialize(input)
+ recordCheckpointer := NewRecordProcessorCheckpoint(sc.shard, sc.checkpointer)
+
+ var continuationSequenceNumber *string
+ refreshLeaseTimer := time.After(time.Until(sc.shard.LeaseTimeout.Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond)))
+ for {
+ getRecordsStartTime := time.Now()
+ select {
+ case <-*sc.stop:
+ shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.REQUESTED, Checkpointer: recordCheckpointer}
+ sc.recordProcessor.Shutdown(shutdownInput)
+ return nil
+ case <-refreshLeaseTimer:
+ log.Debugf("Refreshing lease on shard: %s for worker: %s", sc.shard.ID, sc.consumerID)
+ err = sc.checkpointer.GetLease(sc.shard, sc.consumerID)
+ if err != nil {
+ if errors.As(err, &chk.ErrLeaseNotAcquired{}) {
+ log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", sc.shard.ID, sc.consumerID)
+ return nil
+ }
+ log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v", sc.shard.ID, sc.consumerID, err)
+ return err
+ }
+ refreshLeaseTimer = time.After(time.Until(sc.shard.LeaseTimeout.Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond)))
+ case event, ok := <-shardSub.GetStream().Events():
+ if !ok {
+ // need to resubscribe to shard
+ log.Debugf("Event stream ended, refreshing subscription on shard: %s for worker: %s", sc.shard.ID, sc.consumerID)
+ if continuationSequenceNumber == nil || *continuationSequenceNumber == "" {
+ log.Debugf("No continuation sequence number")
+ return nil
+ }
+ shardSub, err = sc.resubscribe(shardSub, continuationSequenceNumber)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ subEvent, ok := event.(*types.SubscribeToShardEventStreamMemberSubscribeToShardEvent)
+ if !ok {
+ log.Errorf("Received unexpected event type: %T", event)
+ continue
+ }
+ continuationSequenceNumber = subEvent.Value.ContinuationSequenceNumber
+ sc.processRecords(getRecordsStartTime, subEvent.Value.Records, subEvent.Value.MillisBehindLatest, recordCheckpointer)
+
+ // The shard has been closed, so no new records can be read from it
+ if continuationSequenceNumber == nil {
+ log.Infof("Shard %s closed", sc.shard.ID)
+ shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.TERMINATE, Checkpointer: recordCheckpointer}
+ sc.recordProcessor.Shutdown(shutdownInput)
+ return nil
+ }
+ }
+ }
+}
+
+func (sc *FanOutShardConsumer) subscribeToShard() (*kinesis.SubscribeToShardOutput, error) {
+ startPosition, err := sc.getStartingPosition()
+ if err != nil {
+ return nil, err
+ }
+
+ return sc.kc.SubscribeToShard(context.TODO(), &kinesis.SubscribeToShardInput{
+ ConsumerARN: &sc.consumerARN,
+ ShardId: &sc.shard.ID,
+ StartingPosition: startPosition,
+ })
+}
+
+func (sc *FanOutShardConsumer) resubscribe(shardSub *kinesis.SubscribeToShardOutput, continuationSequence *string) (*kinesis.SubscribeToShardOutput, error) {
+ err := shardSub.GetStream().Close()
+ if err != nil {
+ sc.kclConfig.Logger.Errorf("Unable to close event stream for %s: %v", sc.shard.ID, err)
+ return nil, err
+ }
+ startPosition := &types.StartingPosition{
+ Type: types.ShardIteratorTypeAfterSequenceNumber,
+ SequenceNumber: continuationSequence,
+ }
+ shardSub, err = sc.kc.SubscribeToShard(context.TODO(), &kinesis.SubscribeToShardInput{
+ ConsumerARN: &sc.consumerARN,
+ ShardId: &sc.shard.ID,
+ StartingPosition: startPosition,
+ })
+ if err != nil {
+ sc.kclConfig.Logger.Errorf("Unable to resubscribe to shard %s: %v", sc.shard.ID, err)
+ return nil, err
+ }
+ return shardSub, nil
+}
diff --git a/clientlibrary/worker/polling-shard-consumer.go b/clientlibrary/worker/polling-shard-consumer.go
new file mode 100644
index 0000000..f0a1d9e
--- /dev/null
+++ b/clientlibrary/worker/polling-shard-consumer.go
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package worker
+// The implementation is derived from https://github.com/patrobinson/gokini
+//
+// Copyright 2018 Patrick robinson
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+package worker
+
+import (
+ "context"
+ "errors"
+ "math"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis/types"
+
+ chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint"
+ kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces"
+ "github.com/vmware/vmware-go-kcl/clientlibrary/metrics"
+)
+
+// PollingShardConsumer is responsible for polling data records from a (specified) shard.
+// Note: PollingShardConsumer only deal with one shard.
+type PollingShardConsumer struct {
+ commonShardConsumer
+ streamName string
+ stop *chan struct{}
+ consumerID string
+ mService metrics.MonitoringService
+}
+
+func (sc *PollingShardConsumer) getShardIterator() (*string, error) {
+ startPosition, err := sc.getStartingPosition()
+ if err != nil {
+ return nil, err
+ }
+
+ shardIterArgs := &kinesis.GetShardIteratorInput{
+ ShardId: &sc.shard.ID,
+ ShardIteratorType: startPosition.Type,
+ StartingSequenceNumber: startPosition.SequenceNumber,
+ Timestamp: startPosition.Timestamp,
+ StreamName: &sc.streamName,
+ }
+
+ iterResp, err := sc.kc.GetShardIterator(context.TODO(), shardIterArgs)
+ if err != nil {
+ return nil, err
+ }
+
+ return iterResp.ShardIterator, nil
+}
+
+// getRecords continuously poll one shard for data record
+// Precondition: it currently has the lease on the shard.
+func (sc *PollingShardConsumer) getRecords() error {
+ defer sc.releaseLease()
+
+ log := sc.kclConfig.Logger
+
+ // If the shard is child shard, need to wait until the parent finished.
+ if err := sc.waitOnParentShard(); err != nil {
+ // If parent shard has been deleted by Kinesis system already, just ignore the error.
+ if err != chk.ErrSequenceIDNotFound {
+ log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", sc.shard.ParentShardId, err)
+ return err
+ }
+ }
+
+ shardIterator, err := sc.getShardIterator()
+ if err != nil {
+ log.Errorf("Unable to get shard iterator for %s: %v", sc.shard.ID, err)
+ return err
+ }
+
+ // Start processing events and notify record processor on shard and starting checkpoint
+ input := &kcl.InitializationInput{
+ ShardId: sc.shard.ID,
+ ExtendedSequenceNumber: &kcl.ExtendedSequenceNumber{SequenceNumber: aws.String(sc.shard.GetCheckpoint())},
+ }
+ sc.recordProcessor.Initialize(input)
+
+ recordCheckpointer := NewRecordProcessorCheckpoint(sc.shard, sc.checkpointer)
+ retriedErrors := 0
+
+ for {
+ if time.Now().UTC().After(sc.shard.GetLeaseTimeout().Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond)) {
+ log.Debugf("Refreshing lease on shard: %s for worker: %s", sc.shard.ID, sc.consumerID)
+ err = sc.checkpointer.GetLease(sc.shard, sc.consumerID)
+ if err != nil {
+ if errors.As(err, &chk.ErrLeaseNotAcquired{}) {
+ log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", sc.shard.ID, sc.consumerID)
+ return nil
+ }
+ // log and return error
+ log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v",
+ sc.shard.ID, sc.consumerID, err)
+ return err
+ }
+ }
+
+ getRecordsStartTime := time.Now()
+
+ log.Debugf("Trying to read %d record from iterator: %v", sc.kclConfig.MaxRecords, aws.ToString(shardIterator))
+ getRecordsArgs := &kinesis.GetRecordsInput{
+ Limit: aws.Int32(int32(sc.kclConfig.MaxRecords)),
+ ShardIterator: shardIterator,
+ }
+
+ // Get records from stream and retry as needed
+ getResp, err := sc.kc.GetRecords(context.TODO(), getRecordsArgs)
+ if err != nil {
+ //aws-sdk-go-v2 https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md#error-handling
+ var throughputExceededErr *types.ProvisionedThroughputExceededException
+ var kmsThrottlingErr *types.KMSThrottlingException
+ if errors.As(err, &throughputExceededErr) || errors.As(err, &kmsThrottlingErr) {
+ log.Errorf("Error getting records from shard %v: %+v", sc.shard.ID, err)
+ retriedErrors++
+ // exponential backoff
+ // https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff
+ time.Sleep(time.Duration(math.Exp2(float64(retriedErrors))*100) * time.Millisecond)
+ continue
+ }
+ log.Errorf("Error getting records from Kinesis that cannot be retried: %+v Request: %s", err, getRecordsArgs)
+ return err
+ }
+ // reset the retry count after success
+ retriedErrors = 0
+
+ sc.processRecords(getRecordsStartTime, getResp.Records, getResp.MillisBehindLatest, recordCheckpointer)
+
+ // The shard has been closed, so no new records can be read from it
+ if getResp.NextShardIterator == nil {
+ log.Infof("Shard %s closed", sc.shard.ID)
+ shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.TERMINATE, Checkpointer: recordCheckpointer}
+ sc.recordProcessor.Shutdown(shutdownInput)
+ return nil
+ }
+ shardIterator = getResp.NextShardIterator
+
+ // Idle between each read, the user is responsible for checkpoint the progress
+ // This value is only used when no records are returned; if records are returned, it should immediately
+ // retrieve the next set of records.
+ if len(getResp.Records) == 0 && aws.ToInt64(getResp.MillisBehindLatest) < int64(sc.kclConfig.IdleTimeBetweenReadsInMillis) {
+ time.Sleep(time.Duration(sc.kclConfig.IdleTimeBetweenReadsInMillis) * time.Millisecond)
+ }
+
+ select {
+ case <-*sc.stop:
+ shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.REQUESTED, Checkpointer: recordCheckpointer}
+ sc.recordProcessor.Shutdown(shutdownInput)
+ return nil
+ default:
+ }
+ }
+}
diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go
new file mode 100644
index 0000000..c89dc4a
--- /dev/null
+++ b/clientlibrary/worker/record-processor-checkpointer.go
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package worker
+package worker
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+
+ chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint"
+ kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces"
+ par "github.com/vmware/vmware-go-kcl/clientlibrary/partition"
+)
+
+type (
+
+ // PreparedCheckpointer
+ /*
+ * Objects of this class are prepared to checkpoint at a specific sequence number. They use an
+ * IRecordProcessorCheckpointer to do the actual checkpointing, so their checkpoint is subject to the same 'didn't go
+ * backwards' validation as a normal checkpoint.
+ */
+ PreparedCheckpointer struct {
+ pendingCheckpointSequenceNumber *kcl.ExtendedSequenceNumber
+ checkpointer kcl.IRecordProcessorCheckpointer
+ }
+
+ //RecordProcessorCheckpointer
+ /*
+ * This class is used to enable RecordProcessors to checkpoint their progress.
+ * The Amazon Kinesis Client Library will instantiate an object and provide a reference to the application
+ * RecordProcessor instance. Amazon Kinesis Client Library will create one instance per shard assignment.
+ */
+ RecordProcessorCheckpointer struct {
+ shard *par.ShardStatus
+ checkpoint chk.Checkpointer
+ }
+)
+
+func NewRecordProcessorCheckpoint(shard *par.ShardStatus, checkpoint chk.Checkpointer) kcl.IRecordProcessorCheckpointer {
+ return &RecordProcessorCheckpointer{
+ shard: shard,
+ checkpoint: checkpoint,
+ }
+}
+
+func (pc *PreparedCheckpointer) GetPendingCheckpoint() *kcl.ExtendedSequenceNumber {
+ return pc.pendingCheckpointSequenceNumber
+}
+
+func (pc *PreparedCheckpointer) Checkpoint() error {
+ return pc.checkpointer.Checkpoint(pc.pendingCheckpointSequenceNumber.SequenceNumber)
+}
+
+func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error {
+ // checkpoint the last sequence of a closed shard
+ if sequenceNumber == nil {
+ rc.shard.SetCheckpoint(chk.ShardEnd)
+ } else {
+ rc.shard.SetCheckpoint(aws.ToString(sequenceNumber))
+ }
+
+ return rc.checkpoint.CheckpointSequence(rc.shard)
+}
+
+func (rc *RecordProcessorCheckpointer) PrepareCheckpoint(_ *string) (kcl.IPreparedCheckpointer, error) {
+ return &PreparedCheckpointer{}, nil
+}
diff --git a/clientlibrary/worker/worker-fan-out.go b/clientlibrary/worker/worker-fan-out.go
new file mode 100644
index 0000000..a7943b3
--- /dev/null
+++ b/clientlibrary/worker/worker-fan-out.go
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2021 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package worker
+package worker
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/service/kinesis"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis/types"
+)
+
+// fetchConsumerARNWithRetry tries to fetch consumer ARN. Retries 10 times with exponential backoff in case of an error
+func (w *Worker) fetchConsumerARNWithRetry() (string, error) {
+ for retry := 0; ; retry++ {
+ consumerARN, err := w.fetchConsumerARN()
+ if err == nil {
+ return consumerARN, nil
+ }
+ if retry < 10 {
+ sleepDuration := time.Duration(math.Exp2(float64(retry))*100) * time.Millisecond
+ w.kclConfig.Logger.Errorf("Could not get consumer ARN: %v, retrying after: %s", err, sleepDuration)
+ time.Sleep(sleepDuration)
+ continue
+ }
+ return consumerARN, err
+ }
+}
+
+// fetchConsumerARN gets enhanced fan-out consumerARN.
+// Registers enhanced fan-out consumer if the consumer is not found
+func (w *Worker) fetchConsumerARN() (string, error) {
+ log := w.kclConfig.Logger
+ log.Debugf("Fetching stream consumer ARN")
+
+ streamDescription, err := w.kc.DescribeStream(context.TODO(), &kinesis.DescribeStreamInput{
+ StreamName: &w.kclConfig.StreamName,
+ })
+
+ if err != nil {
+ log.Errorf("Could not describe stream: %v", err)
+ return "", err
+ }
+
+ streamConsumerDescription, err := w.kc.DescribeStreamConsumer(context.TODO(), &kinesis.DescribeStreamConsumerInput{
+ ConsumerName: &w.kclConfig.EnhancedFanOutConsumerName,
+ StreamARN: streamDescription.StreamDescription.StreamARN,
+ })
+
+ if err == nil {
+ log.Infof("Enhanced fan-out consumer found, consumer status: %s", streamConsumerDescription.ConsumerDescription.ConsumerStatus)
+ if streamConsumerDescription.ConsumerDescription.ConsumerStatus != types.ConsumerStatusActive {
+ return "", fmt.Errorf("consumer is not in active status yet, current status: %s", streamConsumerDescription.ConsumerDescription.ConsumerStatus)
+ }
+ return *streamConsumerDescription.ConsumerDescription.ConsumerARN, nil
+ }
+
+ //aws-sdk-go-v2 https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md#error-handling
+ var notFoundErr *types.ResourceNotFoundException
+ if errors.As(err, ¬FoundErr) {
+ log.Infof("Enhanced fan-out consumer not found, registering new consumer with name: %s", w.kclConfig.EnhancedFanOutConsumerName)
+ out, err := w.kc.RegisterStreamConsumer(context.TODO(), &kinesis.RegisterStreamConsumerInput{
+ ConsumerName: &w.kclConfig.EnhancedFanOutConsumerName,
+ StreamARN: streamDescription.StreamDescription.StreamARN,
+ })
+ if err != nil {
+ log.Errorf("Could not register enhanced fan-out consumer: %v", err)
+ return "", err
+ }
+ if out.Consumer.ConsumerStatus != types.ConsumerStatusActive {
+ return "", fmt.Errorf("consumer is not in active status yet, current status: %s", out.Consumer.ConsumerStatus)
+ }
+ return *out.Consumer.ConsumerARN, nil
+ }
+
+ log.Errorf("Could not describe stream consumer: %v", err) //%w should we unwrap the underlying error?
+
+ return "", err
+}
diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go
new file mode 100644
index 0000000..01db8f5
--- /dev/null
+++ b/clientlibrary/worker/worker.go
@@ -0,0 +1,539 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package worker
+// The implementation is derived from https://github.com/patrobinson/gokini
+//
+// Copyright 2018 Patrick robinson
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+package worker
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ awsConfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis"
+
+ chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint"
+ "github.com/vmware/vmware-go-kcl/clientlibrary/config"
+ kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces"
+ "github.com/vmware/vmware-go-kcl/clientlibrary/metrics"
+ par "github.com/vmware/vmware-go-kcl/clientlibrary/partition"
+)
+
+//Worker is the high level class that Kinesis applications use to start processing data. It initializes and oversees
+//different components (e.g. syncing shard and lease information, tracking shard assignments, and processing data from
+//the shards).
+type Worker struct {
+ streamName string
+ regionName string
+ workerID string
+ consumerARN string
+
+ processorFactory kcl.IRecordProcessorFactory
+ kclConfig *config.KinesisClientLibConfiguration
+ kc *kinesis.Client
+ checkpointer chk.Checkpointer
+ mService metrics.MonitoringService
+
+ stop *chan struct{}
+ waitGroup *sync.WaitGroup
+ done bool
+
+ randomSeed int64
+
+ shardStatus map[string]*par.ShardStatus
+ shardStealInProgress bool
+}
+
+// NewWorker constructs a Worker instance for processing Kinesis stream data.
+func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisClientLibConfiguration) *Worker {
+ mService := kclConfig.MonitoringService
+ if mService == nil {
+ // Replaces nil with noop monitor service (not emitting any metrics).
+ mService = metrics.NoopMonitoringService{}
+ }
+
+ return &Worker{
+ streamName: kclConfig.StreamName,
+ regionName: kclConfig.RegionName,
+ workerID: kclConfig.WorkerID,
+ processorFactory: factory,
+ kclConfig: kclConfig,
+ mService: mService,
+ done: false,
+ randomSeed: time.Now().UTC().UnixNano(),
+ }
+}
+
+// WithKinesis is used to provide Kinesis service for either custom implementation or unit testing.
+func (w *Worker) WithKinesis(svc *kinesis.Client) *Worker {
+ w.kc = svc
+ return w
+}
+
+// WithCheckpointer is used to provide a custom checkpointer service for non-dynamodb implementation
+// or unit testing.
+func (w *Worker) WithCheckpointer(checker chk.Checkpointer) *Worker {
+ w.checkpointer = checker
+ return w
+}
+
+// Start Run starts consuming data from the stream, and pass it to the application record processors.
+func (w *Worker) Start() error {
+ log := w.kclConfig.Logger
+ if err := w.initialize(); err != nil {
+ log.Errorf("Failed to initialize Worker: %+v", err)
+ return err
+ }
+
+ // Start monitoring service
+ log.Infof("Starting monitoring service.")
+ if err := w.mService.Start(); err != nil {
+ log.Errorf("Failed to start monitoring service: %+v", err)
+ return err
+ }
+
+ log.Infof("Starting worker event loop.")
+ w.waitGroup.Add(1)
+ go func() {
+ defer w.waitGroup.Done()
+ // entering event loop
+ w.eventLoop()
+ }()
+ return nil
+}
+
+// Shutdown signals worker to shut down. Worker will try initiating shutdown of all record processors.
+func (w *Worker) Shutdown() {
+ log := w.kclConfig.Logger
+ log.Infof("Worker shutdown in requested.")
+
+ if w.done || w.stop == nil {
+ return
+ }
+
+ close(*w.stop)
+ w.done = true
+ w.waitGroup.Wait()
+
+ w.mService.Shutdown()
+ log.Infof("Worker loop is complete. Exiting from worker.")
+}
+
+// initialize
+func (w *Worker) initialize() error {
+ log := w.kclConfig.Logger
+ log.Infof("Worker initialization in progress...")
+
+ // Create default Kinesis client
+ if w.kc == nil {
+ // create session for Kinesis
+ log.Infof("Creating Kinesis client")
+
+ resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ PartitionID: "aws",
+ URL: w.kclConfig.KinesisEndpoint,
+ SigningRegion: w.regionName,
+ }, nil
+ })
+
+ cfg, err := awsConfig.LoadDefaultConfig(
+ context.TODO(),
+ awsConfig.WithRegion(w.regionName),
+ awsConfig.WithCredentialsProvider(
+ credentials.NewStaticCredentialsProvider(
+ w.kclConfig.KinesisCredentials.Value.AccessKeyID,
+ w.kclConfig.KinesisCredentials.Value.SecretAccessKey,
+ w.kclConfig.KinesisCredentials.Value.SessionToken)),
+ awsConfig.WithEndpointResolver(resolver),
+ awsConfig.WithRetryer(func() aws.Retryer {
+ return retry.AddWithMaxBackoffDelay(retry.NewStandard(), retry.DefaultMaxBackoff)
+ }),
+ )
+
+ if err != nil {
+ // no need to move forward
+ log.Fatalf("Failed in loading Kinesis default config for creating Worker: %+v", err)
+ }
+ w.kc = kinesis.NewFromConfig(cfg)
+ } else {
+ log.Infof("Use custom Kinesis service.")
+ }
+
+ // Create default dynamodb based checkpointer implementation
+ if w.checkpointer == nil {
+ log.Infof("Creating DynamoDB based checkpointer")
+ w.checkpointer = chk.NewDynamoCheckpoint(w.kclConfig)
+ } else {
+ log.Infof("Use custom checkpointer implementation.")
+ }
+
+ if w.kclConfig.EnableEnhancedFanOutConsumer {
+ log.Debugf("Enhanced fan-out is enabled")
+ w.consumerARN = w.kclConfig.EnhancedFanOutConsumerARN
+ if w.consumerARN == "" {
+ var err error
+ w.consumerARN, err = w.fetchConsumerARNWithRetry()
+ if err != nil {
+ log.Errorf("Failed to fetch consumer ARN for: %s, %v", w.kclConfig.EnhancedFanOutConsumerName, err)
+ return err
+ }
+ }
+ }
+
+ err := w.mService.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID)
+ if err != nil {
+ log.Errorf("Failed to start monitoring service: %+v", err)
+ }
+
+ log.Infof("Initializing Checkpointer")
+ if err := w.checkpointer.Init(); err != nil {
+ log.Errorf("Failed to start Checkpointer: %+v", err)
+ return err
+ }
+
+ w.shardStatus = make(map[string]*par.ShardStatus)
+
+ stopChan := make(chan struct{})
+ w.stop = &stopChan
+
+ w.waitGroup = &sync.WaitGroup{}
+
+ log.Infof("Initialization complete.")
+
+ return nil
+}
+
+// newShardConsumer creates shard consumer for the specified shard
+func (w *Worker) newShardConsumer(shard *par.ShardStatus) shardConsumer {
+ common := commonShardConsumer{
+ shard: shard,
+ kc: w.kc,
+ checkpointer: w.checkpointer,
+ recordProcessor: w.processorFactory.CreateProcessor(),
+ kclConfig: w.kclConfig,
+ mService: w.mService,
+ }
+ if w.kclConfig.EnableEnhancedFanOutConsumer {
+ w.kclConfig.Logger.Infof("Start enhanced fan-out shard consumer for shard: %v", shard.ID)
+ return &FanOutShardConsumer{
+ commonShardConsumer: common,
+ consumerARN: w.consumerARN,
+ consumerID: w.workerID,
+ stop: w.stop,
+ }
+ }
+ w.kclConfig.Logger.Infof("Start polling shard consumer for shard: %v", shard.ID)
+ return &PollingShardConsumer{
+ commonShardConsumer: common,
+ streamName: w.streamName,
+ consumerID: w.workerID,
+ stop: w.stop,
+ mService: w.mService,
+ }
+}
+
+// eventLoop
+func (w *Worker) eventLoop() {
+ log := w.kclConfig.Logger
+
+ var foundShards int
+ for {
+ // Add [-50%, +50%] random jitter to ShardSyncIntervalMillis. When multiple workers
+ // starts at the same time, this decreases the probability of them calling
+ // kinesis.DescribeStream at the same time, and hit the hard-limit on aws API calls.
+ // On average the period remains the same so that doesn't affect behavior.
+ rnd, _ := rand.Int(rand.Reader, big.NewInt(int64(w.kclConfig.ShardSyncIntervalMillis)))
+ shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + int(rnd.Int64())
+
+ err := w.syncShard()
+ if err != nil {
+ log.Errorf("Error syncing shards: %+v, Retrying in %d ms...", err, shardSyncSleep)
+ time.Sleep(time.Duration(shardSyncSleep) * time.Millisecond)
+ continue
+ }
+
+ if foundShards == 0 || foundShards != len(w.shardStatus) {
+ foundShards = len(w.shardStatus)
+ log.Infof("Found %d shards", foundShards)
+ }
+
+ // Count the number of leases held by this worker excluding the processed shard
+ counter := 0
+ for _, shard := range w.shardStatus {
+ if shard.GetLeaseOwner() == w.workerID && shard.GetCheckpoint() != chk.ShardEnd {
+ counter++
+ }
+ }
+
+ // max number of lease has not been reached yet
+ if counter < w.kclConfig.MaxLeasesForWorker {
+ for _, shard := range w.shardStatus {
+ // already owner of the shard
+ if shard.GetLeaseOwner() == w.workerID {
+ continue
+ }
+
+ err := w.checkpointer.FetchCheckpoint(shard)
+ if err != nil {
+ // checkpoint may not exist yet is not an error condition.
+ if err != chk.ErrSequenceIDNotFound {
+ log.Warnf("Couldn't fetch checkpoint: %+v", err)
+ // move on to next shard
+ continue
+ }
+ }
+
+ // The shard is closed and we have processed all records
+ if shard.GetCheckpoint() == chk.ShardEnd {
+ continue
+ }
+
+ var stealShard bool
+ if w.kclConfig.EnableLeaseStealing && shard.ClaimRequest != "" {
+ upcomingStealingInterval := time.Now().UTC().Add(time.Duration(w.kclConfig.LeaseStealingIntervalMillis) * time.Millisecond)
+ if shard.GetLeaseTimeout().Before(upcomingStealingInterval) && !shard.IsClaimRequestExpired(w.kclConfig) {
+ if shard.ClaimRequest == w.workerID {
+ stealShard = true
+ log.Debugf("Stealing shard: %s", shard.ID)
+ } else {
+ log.Debugf("Shard being stolen: %s", shard.ID)
+ continue
+ }
+ }
+ }
+
+ err = w.checkpointer.GetLease(shard, w.workerID)
+ if err != nil {
+ // cannot get lease on the shard
+ if !errors.As(err, &chk.ErrLeaseNotAcquired{}) {
+ log.Errorf("Cannot get lease: %+v", err)
+ }
+ continue
+ }
+
+ if stealShard {
+ log.Debugf("Successfully stole shard: %+v", shard.ID)
+ w.shardStealInProgress = false
+ }
+
+ // log metrics on got lease
+ w.mService.LeaseGained(shard.ID)
+ w.waitGroup.Add(1)
+ go func(shard *par.ShardStatus) {
+ defer w.waitGroup.Done()
+ if err := w.newShardConsumer(shard).getRecords(); err != nil {
+ log.Errorf("Error in getRecords: %+v", err)
+ }
+ }(shard)
+ // exit from for loop and not to grab more shard for now.
+ break
+ }
+ }
+
+ if w.kclConfig.EnableLeaseStealing {
+ err = w.rebalance()
+ if err != nil {
+ log.Warnf("Error in rebalance: %+v", err)
+ }
+ }
+
+ select {
+ case <-*w.stop:
+ log.Infof("Shutting down...")
+ return
+ case <-time.After(time.Duration(shardSyncSleep) * time.Millisecond):
+ log.Debugf("Waited %d ms to sync shards...", shardSyncSleep)
+ }
+ }
+}
+
+func (w *Worker) rebalance() error {
+ log := w.kclConfig.Logger
+
+ workers, err := w.checkpointer.ListActiveWorkers(w.shardStatus)
+ if err != nil {
+ log.Debugf("Error listing workers. workerID: %s. Error: %+v ", w.workerID, err)
+ return err
+ }
+
+ // Only attempt to steal one shard at time, to allow for linear convergence
+ if w.shardStealInProgress {
+ shardInfo := make(map[string]bool)
+ err := w.getShardIDs("", shardInfo)
+ if err != nil {
+ return err
+ }
+ for _, shard := range w.shardStatus {
+ if shard.ClaimRequest != "" && shard.ClaimRequest == w.workerID {
+ log.Debugf("Steal in progress. workerID: %s", w.workerID)
+ return nil
+ }
+ // Our shard steal was stomped on by a Checkpoint.
+ // We could deal with that, but instead just try again
+ w.shardStealInProgress = false
+ }
+ }
+
+ var numShards int
+ for _, shards := range workers {
+ numShards += len(shards)
+ }
+
+ numWorkers := len(workers)
+
+ // 1:1 shards to workers is optimal, so we cannot possibly rebalance
+ if numWorkers >= numShards {
+ log.Debugf("Optimal shard allocation, not stealing any shards. workerID: %s, %v > %v. ", w.workerID, numWorkers, numShards)
+ return nil
+ }
+
+ currentShards, ok := workers[w.workerID]
+ var numCurrentShards int
+ if !ok {
+ numCurrentShards = 0
+ numWorkers++
+ } else {
+ numCurrentShards = len(currentShards)
+ }
+
+ optimalShards := numShards / numWorkers
+
+ // We have more than or equal optimal shards, so no rebalancing can take place
+ if numCurrentShards >= optimalShards || numCurrentShards == w.kclConfig.MaxLeasesForWorker {
+ log.Debugf("We have enough shards, not attempting to steal any. workerID: %s", w.workerID)
+ return nil
+ }
+
+ var workerSteal string
+ for worker, shards := range workers {
+ if worker != w.workerID && len(shards) > optimalShards {
+ workerSteal = worker
+ optimalShards = len(shards)
+ }
+ }
+ // Not all shards are allocated so fallback to default shard allocation mechanisms
+ if workerSteal == "" {
+ log.Infof("Not all shards are allocated, not stealing any. workerID: %s", w.workerID)
+ return nil
+ }
+
+ // Steal a random shard from the worker with the most shards
+ w.shardStealInProgress = true
+ rnd, _ := rand.Int(rand.Reader, big.NewInt(int64(len(workers[workerSteal]))))
+ randIndex := int(rnd.Int64())
+ shardToSteal := workers[workerSteal][randIndex]
+ log.Debugf("Stealing shard %s from %s", shardToSteal, workerSteal)
+
+ err = w.checkpointer.ClaimShard(w.shardStatus[shardToSteal.ID], w.workerID)
+ if err != nil {
+ w.shardStealInProgress = false
+ return err
+ }
+ return nil
+}
+
+// List all shards and store them into shardStatus table
+// If shard has been removed, need to exclude it from cached shard status.
+func (w *Worker) getShardIDs(nextToken string, shardInfo map[string]bool) error {
+ log := w.kclConfig.Logger
+
+ args := &kinesis.ListShardsInput{}
+
+ // When you have a nextToken, you can't set the streamName
+ if nextToken != "" {
+ args.NextToken = aws.String(nextToken)
+ } else {
+ args.StreamName = aws.String(w.streamName)
+ }
+
+ listShards, err := w.kc.ListShards(context.TODO(), args)
+ if err != nil {
+ log.Errorf("Error in ListShards: %s Error: %+v Request: %s", w.streamName, err, args)
+ return err
+ }
+
+ for _, s := range listShards.Shards {
+ // record avail shardId from fresh reading from Kinesis
+ shardInfo[*s.ShardId] = true
+
+ // found new shard
+ if _, ok := w.shardStatus[*s.ShardId]; !ok {
+ log.Infof("Found new shard with id %s", *s.ShardId)
+ w.shardStatus[*s.ShardId] = &par.ShardStatus{
+ ID: *s.ShardId,
+ ParentShardId: aws.ToString(s.ParentShardId),
+ Mux: &sync.RWMutex{},
+ StartingSequenceNumber: aws.ToString(s.SequenceNumberRange.StartingSequenceNumber),
+ EndingSequenceNumber: aws.ToString(s.SequenceNumberRange.EndingSequenceNumber),
+ }
+ }
+ }
+
+ if listShards.NextToken != nil {
+ err := w.getShardIDs(aws.ToString(listShards.NextToken), shardInfo)
+ if err != nil {
+ log.Errorf("Error in ListShards: %s Error: %+v Request: %s", w.streamName, err, args)
+ return err
+ }
+ }
+
+ return nil
+}
+
+// syncShard to sync the cached shard info with actual shard info from Kinesis
+func (w *Worker) syncShard() error {
+ log := w.kclConfig.Logger
+ shardInfo := make(map[string]bool)
+ err := w.getShardIDs("", shardInfo)
+
+ if err != nil {
+ return err
+ }
+
+ for _, shard := range w.shardStatus {
+ // The cached shard no longer existed, remove it.
+ if _, ok := shardInfo[shard.ID]; !ok {
+ // remove the shard from local status cache
+ delete(w.shardStatus, shard.ID)
+ // remove the shard entry in dynamoDB as well
+ // Note: syncShard runs periodically. we don't need to do anything in case of error here.
+ if err := w.checkpointer.RemoveLeaseInfo(shard.ID); err != nil {
+ log.Errorf("Failed to remove shard lease info: %s Error: %+v", shard.ID, err)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..d6133f7
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,49 @@
+module github.com/vmware/vmware-go-kcl
+
+go 1.17
+
+require (
+ github.com/aws/aws-sdk-go-v2 v1.11.0
+ github.com/aws/aws-sdk-go-v2/config v1.10.0
+ github.com/aws/aws-sdk-go-v2/credentials v1.6.0
+ github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.10.0
+ github.com/aws/aws-sdk-go-v2/service/dynamodb v1.7.0
+ github.com/aws/aws-sdk-go-v2/service/kinesis v1.8.0
+ github.com/golang/protobuf v1.5.2
+ github.com/google/uuid v1.3.0
+ github.com/prometheus/client_golang v1.11.0
+ github.com/prometheus/common v0.32.1
+ github.com/rs/zerolog v1.26.0
+ github.com/sirupsen/logrus v1.8.1
+ github.com/stretchr/testify v1.7.0
+ go.uber.org/zap v1.19.1
+ gopkg.in/natefinch/lumberjack.v2 v2.0.0
+)
+
+require (
+ github.com/BurntSushi/toml v0.4.1 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.6.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.9.0 // indirect
+ github.com/aws/smithy-go v1.9.0 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_model v0.2.0 // indirect
+ github.com/prometheus/procfs v0.7.3 // indirect
+ go.uber.org/atomic v1.9.0 // indirect
+ go.uber.org/multierr v1.7.0 // indirect
+ golang.org/x/sys v0.0.0-20211111213525-f221eed1c01e // indirect
+ google.golang.org/protobuf v1.27.1 // indirect
+ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..a170946
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,550 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
+github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/aws/aws-sdk-go-v2 v1.11.0 h1:HxyD62DyNhCfiFGUHqJ/xITD6rAjJ7Dm/2nLxLmO4Ag=
+github.com/aws/aws-sdk-go-v2 v1.11.0/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 h1:yVUAwvJC/0WNPbyl0nA3j1L6CW1CN8wBubCRqtG7JLI=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM=
+github.com/aws/aws-sdk-go-v2/config v1.10.0 h1:4i+/7DmCQCAls5Z61giur0LOPZ3PXFwnSIw7hRamzws=
+github.com/aws/aws-sdk-go-v2/config v1.10.0/go.mod h1:xuqoV5etD3N3B8Ts9je4ijgAv6mb+6NiOPFMUhwRcjA=
+github.com/aws/aws-sdk-go-v2/credentials v1.6.0 h1:L3O6osQTlzLKRmiTphw2QJuD21EFapWCX4IipiRJhAE=
+github.com/aws/aws-sdk-go-v2/credentials v1.6.0/go.mod h1:rQkYdQPDXRrvPLeEuCNwSgtwMzBo9eDGWlTNC69Sh/0=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0 h1:OpZjuUy8Jt3CA1WgJgBC5Bz+uOjE5Ppx4NFTRaooUuA=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0/go.mod h1:5E1J3/TTYy6z909QNR0QnXGBpfESYGDqd3O0zqONghU=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 h1:zY8cNmbBXt3pzjgWgdIbzpQ6qxoCwt+Nx9JbrAf2mbY=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0/go.mod h1:NO3Q5ZTTQtO2xIg2+xTXYDiT7knSejfeDm7WGDaOo0U=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 h1:Z3aR/OXBnkYK9zXkNkfitHX6SmUBzSsx8VMHbH4Lvhw=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0/go.mod h1:anlUzBoEWglcUxUQwZA7HQOEVEnQALVZsizAapB2hq8=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0 h1:c10Z7fWxtJCoyc8rv06jdh9xrKnu7bAJiRaKWvTb2mU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0/go.mod h1:6oXGy4GLpypD3uCh8wcqztigGgmhLToMfjavgh+VySg=
+github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.10.0 h1:MNNV0fi3J5Lxxhx8iDlKdRZJrtBv/0FyganA3nBYe8Q=
+github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.10.0/go.mod h1:Oiwhs3Fo9amYOGsJggWBPU6bwa/u0xVpEdOS5HlouPg=
+github.com/aws/aws-sdk-go-v2/service/dynamodb v1.7.0 h1:S3X6RWl0TfMxNXsIzz8r3Y6YVA1HWGSx6M345Q3mQ+I=
+github.com/aws/aws-sdk-go-v2/service/dynamodb v1.7.0/go.mod h1:Hh0zJ3419ET9xQBeR+y0lHIkObJwAKPbzV9nTZ0yrJ0=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 h1:lPLbw4Gn59uoKqvOfSnkJr54XWk5Ak1NK20ZEiSWb3U=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0/go.mod h1:80NaCIH9YU3rzTTs/J/ECATjXuRqzo/wB6ukO6MZ0XY=
+github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.0 h1:A2aUh9d38A2ECh76ahOQUdpJFe+Jhjk8qrfV+YbNYGY=
+github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.0/go.mod h1:5h2rxfLN22pLTQ1ZoOza87rp2SnN/9UDYdYBQRmIrsE=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 h1:qGZWS/WgiFY+Zgad2u0gwBHpJxz6Ne401JE7iQI1nKs=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0/go.mod h1:Mq6AEc+oEjCUlBuLiK5YwW4shSOAKCQ3tXN0sQeYoBA=
+github.com/aws/aws-sdk-go-v2/service/kinesis v1.8.0 h1:Cz26j4wGD1tJ2w/M8iLhaS81AkAGY3gEYRt0xQWjEIs=
+github.com/aws/aws-sdk-go-v2/service/kinesis v1.8.0/go.mod h1:QyNCg1xtWFJVL++i6ZyVcwXZCiKTNeXHH9zZu3NHOdU=
+github.com/aws/aws-sdk-go-v2/service/sso v1.6.0 h1:JDgKIUZOmLFu/Rv6zXLrVTWCmzA0jcTdvsT8iFIKrAI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.6.0/go.mod h1:Q/l0ON1annSU+mc0JybDy1Gy6dnJxIcWjphO6qJPzvM=
+github.com/aws/aws-sdk-go-v2/service/sts v1.9.0 h1:rBLCnL8hQ7Sv1S4XCPYgTMI7Uhg81BkvzIiK+/of2zY=
+github.com/aws/aws-sdk-go-v2/service/sts v1.9.0/go.mod h1:jLKCFqS+1T4i7HDqCP9GM4Uk75YW1cS0o82LdxpMyOE=
+github.com/aws/smithy-go v1.9.0 h1:c7FUdEqrQA1/UVKKCNDFQPNKGp4FQg3YW4Ck5SLTG58=
+github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/zerolog v1.26.0 h1:ORM4ibhEZeTeQlCojCK2kPz1ogAY4bGs4tD+SaAdGaE=
+github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec=
+go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211111213525-f221eed1c01e h1:zeJt6jBtVDK23XK9QXcmG0FvO0elikp0dYZQZOeL1y0=
+golang.org/x/sys v0.0.0-20211111213525-f221eed1c01e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/internal/deaggregator/deaggregator.go b/internal/deaggregator/deaggregator.go
new file mode 100644
index 0000000..91a5ad5
--- /dev/null
+++ b/internal/deaggregator/deaggregator.go
@@ -0,0 +1,96 @@
+// Package deaggregator
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package deaggregator
+
+import (
+ "crypto/md5"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/service/kinesis/types"
+ "github.com/golang/protobuf/proto"
+
+ rec "github.com/vmware/vmware-go-kcl/internal/records"
+)
+
+// KplMagicHeader Magic File Header for a KPL Aggregated Record
+var KplMagicHeader = fmt.Sprintf("%q", []byte("\xf3\x89\x9a\xc2"))
+
+const (
+ KplMagicLen = 4 // Length of magic header for KPL Aggregate Record checking.
+ DigestSize = 16 // MD5 Message size for protobuf.
+)
+
+// DeaggregateRecords takes an array of Kinesis records and expands any Protobuf
+// records within that array, returning an array of all records
+func DeaggregateRecords(records []types.Record) ([]types.Record, error) {
+ var isAggregated bool
+ allRecords := make([]types.Record, 0)
+
+ for _, record := range records {
+ isAggregated = true
+
+ var dataMagic string
+ var decodedDataNoMagic []byte
+ // Check if record is long enough to have magic file header
+ if len(record.Data) >= KplMagicLen {
+ dataMagic = fmt.Sprintf("%q", record.Data[:KplMagicLen])
+ decodedDataNoMagic = record.Data[KplMagicLen:]
+ } else {
+ isAggregated = false
+ }
+
+ // Check if record has KPL Aggregate Record Magic Header and data length
+ // is correct size
+ if KplMagicHeader != dataMagic || len(decodedDataNoMagic) <= DigestSize {
+ isAggregated = false
+ }
+
+ if isAggregated {
+ messageDigest := fmt.Sprintf("%x", decodedDataNoMagic[len(decodedDataNoMagic)-DigestSize:])
+ messageData := decodedDataNoMagic[:len(decodedDataNoMagic)-DigestSize]
+
+ calculatedDigest := fmt.Sprintf("%x", md5.Sum(messageData))
+
+ // Check protobuf MD5 hash matches MD5 sum of record
+ if messageDigest != calculatedDigest {
+ isAggregated = false
+ } else {
+ aggRecord := &rec.AggregatedRecord{}
+ err := proto.Unmarshal(messageData, aggRecord)
+
+ if err != nil {
+ return nil, err
+ }
+
+ partitionKeys := aggRecord.PartitionKeyTable
+
+ for _, aggrec := range aggRecord.Records {
+ newRecord := createUserRecord(partitionKeys, aggrec, record)
+ allRecords = append(allRecords, newRecord)
+ }
+ }
+ }
+
+ if !isAggregated {
+ allRecords = append(allRecords, record)
+ }
+ }
+
+ return allRecords, nil
+}
+
+// createUserRecord takes in the partitionKeys of the aggregated record, the individual
+// deaggregated record, and the original aggregated record builds a kinesis.Record and
+// returns it
+func createUserRecord(partitionKeys []string, aggRec *rec.Record, record types.Record) types.Record {
+ partitionKey := partitionKeys[*aggRec.PartitionKeyIndex]
+
+ return types.Record{
+ ApproximateArrivalTimestamp: record.ApproximateArrivalTimestamp,
+ Data: aggRec.Data,
+ EncryptionType: record.EncryptionType,
+ PartitionKey: &partitionKey,
+ SequenceNumber: record.SequenceNumber,
+ }
+}
diff --git a/internal/deaggregator/deaggregator_test.go b/internal/deaggregator/deaggregator_test.go
new file mode 100644
index 0000000..a933baa
--- /dev/null
+++ b/internal/deaggregator/deaggregator_test.go
@@ -0,0 +1,202 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package deaggregator_test
+
+import (
+ "crypto/md5"
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/service/kinesis/types"
+ "github.com/golang/protobuf/proto"
+ "github.com/stretchr/testify/assert"
+
+ deagg "github.com/vmware/vmware-go-kcl/internal/deaggregator"
+ rec "github.com/vmware/vmware-go-kcl/internal/records"
+)
+
+// Generate an aggregate record in the correct AWS-specified format
+// https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md
+func generateAggregateRecord(numRecords int) []byte {
+
+ aggr := &rec.AggregatedRecord{}
+ // Start with the magic header
+ aggRecord := []byte("\xf3\x89\x9a\xc2")
+ partKeyTable := make([]string, 0)
+
+ // Create proto record with numRecords length
+ for i := 0; i < numRecords; i++ {
+ var partKey uint64
+ var hashKey uint64
+ partKey = uint64(i)
+ hashKey = uint64(i) * uint64(10)
+ r := &rec.Record{
+ PartitionKeyIndex: &partKey,
+ ExplicitHashKeyIndex: &hashKey,
+ Data: []byte("Some test data string"),
+ Tags: make([]*rec.Tag, 0),
+ }
+
+ aggr.Records = append(aggr.Records, r)
+ partKeyVal := "test" + fmt.Sprint(i)
+ partKeyTable = append(partKeyTable, partKeyVal)
+ }
+
+ aggr.PartitionKeyTable = partKeyTable
+ // Marshal to protobuf record, create md5 sum from proto record
+ // and append both to aggRecord with magic header
+ data, _ := proto.Marshal(aggr)
+ md5Hash := md5.Sum(data)
+ aggRecord = append(aggRecord, data...)
+ aggRecord = append(aggRecord, md5Hash[:]...)
+ return aggRecord
+}
+
+// Generate a generic kinesis.Record using whatever []byte
+// is passed in as the data (can be normal []byte or proto record)
+func generateKinesisRecord(data []byte) types.Record {
+ currentTime := time.Now()
+ encryptionType := types.EncryptionTypeNone
+ partitionKey := "1234"
+ sequenceNumber := "21269319989900637946712965403778482371"
+ return types.Record{
+ ApproximateArrivalTimestamp: ¤tTime,
+ Data: data,
+ EncryptionType: encryptionType,
+ PartitionKey: &partitionKey,
+ SequenceNumber: &sequenceNumber,
+ }
+}
+
+// This tests to make sure that the data is at least larger than the length
+// of the magic header to do some array slicing with index out of bounds
+func TestSmallLengthReturnsCorrectNumberOfDeaggregatedRecords(t *testing.T) {
+ var err error
+ var kr types.Record
+
+ krs := make([]types.Record, 0, 1)
+
+ smallByte := []byte("No")
+ kr = generateKinesisRecord(smallByte)
+ krs = append(krs, kr)
+ dars, err := deagg.DeaggregateRecords(krs)
+ if err != nil {
+ panic(err)
+ }
+
+ // Small byte test, since this is not a deaggregated record, should return 1
+ // record in the array.
+ assert.Equal(t, 1, len(dars), "Small Byte test should return length of 1.")
+}
+
+// This function tests to make sure that the data starts with the correct magic header
+// according to KPL aggregate documentation.
+func TestNonMatchingMagicHeaderReturnsSingleRecord(t *testing.T) {
+ var err error
+ var kr types.Record
+
+ krs := make([]types.Record, 0, 1)
+
+ min := 1
+ max := 10
+ n := rand.Intn(max-min) + min
+ aggData := generateAggregateRecord(n)
+ mismatchAggData := aggData[1:]
+ kr = generateKinesisRecord(mismatchAggData)
+
+ krs = append(krs, kr)
+
+ dars, err := deagg.DeaggregateRecords(krs)
+ if err != nil {
+ panic(err)
+ }
+
+ // A byte record with a magic header that does not match 0xF3 0x89 0x9A 0xC2
+ // should return a single record.
+ assert.Equal(t, 1, len(dars), "Mismatch magic header test should return length of 1.")
+}
+
+// This function tests that the DeaggregateRecords function returns the correct number of
+// deaggregated records from a single aggregated record.
+func TestVariableLengthRecordsReturnsCorrectNumberOfDeaggregatedRecords(t *testing.T) {
+ var err error
+ var kr types.Record
+
+ krs := make([]types.Record, 0, 1)
+
+ min := 1
+ max := 10
+ n := rand.Intn(max-min) + min
+ aggData := generateAggregateRecord(n)
+ kr = generateKinesisRecord(aggData)
+ krs = append(krs, kr)
+
+ dars, err := deagg.DeaggregateRecords(krs)
+ if err != nil {
+ panic(err)
+ }
+
+ // Variable Length Aggregate Record test has aggregaterd records and should return
+ // n length.
+ assertMsg := fmt.Sprintf("Variable Length Aggregate Record should return length %v.", len(dars))
+ assert.Equal(t, n, len(dars), assertMsg)
+}
+
+// This function tests the length of the message after magic file header. If length is less than
+// the digest size (16 bytes), it is not an aggregated record.
+func TestRecordAfterMagicHeaderWithLengthLessThanDigestSizeReturnsSingleRecord(t *testing.T) {
+ var err error
+ var kr types.Record
+
+ krs := make([]types.Record, 0, 1)
+
+ min := 1
+ max := 10
+ n := rand.Intn(max-min) + min
+ aggData := generateAggregateRecord(n)
+ // Change size of proto message to 15
+ reducedAggData := aggData[:19]
+ kr = generateKinesisRecord(reducedAggData)
+
+ krs = append(krs, kr)
+
+ dars, err := deagg.DeaggregateRecords(krs)
+ if err != nil {
+ panic(err)
+ }
+
+ // A byte record with length less than 16 after the magic header should return
+ // a single record from DeaggregateRecords
+ assert.Equal(t, 1, len(dars), "Digest size test should return length of 1.")
+}
+
+// This function tests the MD5 Sum at the end of the record by comparing MD5 sum
+// at end of proto record with MD5 Sum of Proto message. If they do not match,
+// it is not an aggregated record.
+func TestRecordWithMismatchMd5SumReturnsSingleRecord(t *testing.T) {
+ var err error
+ var kr types.Record
+
+ krs := make([]types.Record, 0, 1)
+
+ min := 1
+ max := 10
+ n := rand.Intn(max-min) + min
+ aggData := generateAggregateRecord(n)
+ // Remove last byte from array to mismatch the MD5 sums
+ mismatchAggData := aggData[:len(aggData)-1]
+ kr = generateKinesisRecord(mismatchAggData)
+
+ krs = append(krs, kr)
+
+ dars, err := deagg.DeaggregateRecords(krs)
+ if err != nil {
+ panic(err)
+ }
+
+ // A byte record with an MD5 sum that does not match with the md5.Sum(record)
+ // will be marked as a non-aggregate record and return a single record
+ assert.Equal(t, 1, len(dars), "Mismatch md5 sum test should return length of 1.")
+}
diff --git a/internal/records/records.pb.go b/internal/records/records.pb.go
new file mode 100644
index 0000000..89abba5
--- /dev/null
+++ b/internal/records/records.pb.go
@@ -0,0 +1,215 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: records.proto
+
+package records
+
+import (
+ fmt "fmt"
+ math "math"
+
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type AggregatedRecord struct {
+ PartitionKeyTable []string `protobuf:"bytes,1,rep,name=partition_key_table,json=partitionKeyTable" json:"partition_key_table,omitempty"`
+ ExplicitHashKeyTable []string `protobuf:"bytes,2,rep,name=explicit_hash_key_table,json=explicitHashKeyTable" json:"explicit_hash_key_table,omitempty"`
+ Records []*Record `protobuf:"bytes,3,rep,name=records" json:"records,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AggregatedRecord) Reset() { *m = AggregatedRecord{} }
+func (m *AggregatedRecord) String() string { return proto.CompactTextString(m) }
+func (*AggregatedRecord) ProtoMessage() {}
+func (*AggregatedRecord) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6ae0159314830e16, []int{0}
+}
+
+func (m *AggregatedRecord) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AggregatedRecord.Unmarshal(m, b)
+}
+func (m *AggregatedRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AggregatedRecord.Marshal(b, m, deterministic)
+}
+func (m *AggregatedRecord) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AggregatedRecord.Merge(m, src)
+}
+func (m *AggregatedRecord) XXX_Size() int {
+ return xxx_messageInfo_AggregatedRecord.Size(m)
+}
+func (m *AggregatedRecord) XXX_DiscardUnknown() {
+ xxx_messageInfo_AggregatedRecord.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AggregatedRecord proto.InternalMessageInfo
+
+func (m *AggregatedRecord) GetPartitionKeyTable() []string {
+ if m != nil {
+ return m.PartitionKeyTable
+ }
+ return nil
+}
+
+func (m *AggregatedRecord) GetExplicitHashKeyTable() []string {
+ if m != nil {
+ return m.ExplicitHashKeyTable
+ }
+ return nil
+}
+
+func (m *AggregatedRecord) GetRecords() []*Record {
+ if m != nil {
+ return m.Records
+ }
+ return nil
+}
+
+type Tag struct {
+ Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Tag) Reset() { *m = Tag{} }
+func (m *Tag) String() string { return proto.CompactTextString(m) }
+func (*Tag) ProtoMessage() {}
+func (*Tag) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6ae0159314830e16, []int{1}
+}
+
+func (m *Tag) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Tag.Unmarshal(m, b)
+}
+func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Tag.Marshal(b, m, deterministic)
+}
+func (m *Tag) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Tag.Merge(m, src)
+}
+func (m *Tag) XXX_Size() int {
+ return xxx_messageInfo_Tag.Size(m)
+}
+func (m *Tag) XXX_DiscardUnknown() {
+ xxx_messageInfo_Tag.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Tag proto.InternalMessageInfo
+
+func (m *Tag) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *Tag) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Record struct {
+ PartitionKeyIndex *uint64 `protobuf:"varint,1,req,name=partition_key_index,json=partitionKeyIndex" json:"partition_key_index,omitempty"`
+ ExplicitHashKeyIndex *uint64 `protobuf:"varint,2,opt,name=explicit_hash_key_index,json=explicitHashKeyIndex" json:"explicit_hash_key_index,omitempty"`
+ Data []byte `protobuf:"bytes,3,req,name=data" json:"data,omitempty"`
+ Tags []*Tag `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Record) Reset() { *m = Record{} }
+func (m *Record) String() string { return proto.CompactTextString(m) }
+func (*Record) ProtoMessage() {}
+func (*Record) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6ae0159314830e16, []int{2}
+}
+
+func (m *Record) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Record.Unmarshal(m, b)
+}
+func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Record.Marshal(b, m, deterministic)
+}
+func (m *Record) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Record.Merge(m, src)
+}
+func (m *Record) XXX_Size() int {
+ return xxx_messageInfo_Record.Size(m)
+}
+func (m *Record) XXX_DiscardUnknown() {
+ xxx_messageInfo_Record.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Record proto.InternalMessageInfo
+
+func (m *Record) GetPartitionKeyIndex() uint64 {
+ if m != nil && m.PartitionKeyIndex != nil {
+ return *m.PartitionKeyIndex
+ }
+ return 0
+}
+
+func (m *Record) GetExplicitHashKeyIndex() uint64 {
+ if m != nil && m.ExplicitHashKeyIndex != nil {
+ return *m.ExplicitHashKeyIndex
+ }
+ return 0
+}
+
+func (m *Record) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *Record) GetTags() []*Tag {
+ if m != nil {
+ return m.Tags
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*AggregatedRecord)(nil), "AggregatedRecord")
+ proto.RegisterType((*Tag)(nil), "Tag")
+ proto.RegisterType((*Record)(nil), "Record")
+}
+
+func init() { proto.RegisterFile("records.proto", fileDescriptor_6ae0159314830e16) }
+
+var fileDescriptor_6ae0159314830e16 = []byte{
+ // 245 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x8f, 0xc1, 0x4a, 0xc4, 0x30,
+ 0x10, 0x86, 0xc9, 0x26, 0xba, 0x74, 0x54, 0x58, 0xe3, 0x82, 0x39, 0xd6, 0x9e, 0x72, 0xb1, 0x07,
+ 0xc1, 0x07, 0xf0, 0xa6, 0x78, 0x0b, 0xbd, 0x97, 0x71, 0x3b, 0xa4, 0x61, 0xcb, 0xb6, 0xa4, 0x51,
+ 0xb6, 0xef, 0xa2, 0xef, 0x2a, 0x49, 0xdd, 0x45, 0x51, 0x6f, 0x93, 0xf9, 0xf9, 0x32, 0xff, 0x07,
+ 0x17, 0x9e, 0x36, 0xbd, 0x6f, 0xc6, 0x72, 0xf0, 0x7d, 0xe8, 0x8b, 0x77, 0x06, 0xab, 0x07, 0x6b,
+ 0x3d, 0x59, 0x0c, 0xd4, 0x98, 0x94, 0xc9, 0x12, 0xae, 0x06, 0xf4, 0xc1, 0x05, 0xd7, 0xef, 0xea,
+ 0x2d, 0x4d, 0x75, 0xc0, 0x97, 0x8e, 0x14, 0xcb, 0xb9, 0xce, 0xcc, 0xe5, 0x31, 0x7a, 0xa6, 0xa9,
+ 0x8a, 0x81, 0xbc, 0x87, 0x6b, 0xda, 0x0f, 0x9d, 0xdb, 0xb8, 0x50, 0xb7, 0x38, 0xb6, 0xdf, 0x98,
+ 0x45, 0x62, 0xd6, 0x87, 0xf8, 0x11, 0xc7, 0xf6, 0x88, 0xdd, 0xc0, 0xf2, 0xab, 0x8c, 0xe2, 0x39,
+ 0xd7, 0x67, 0x77, 0xcb, 0x72, 0x2e, 0x60, 0x0e, 0xfb, 0xe2, 0x16, 0x78, 0x85, 0x56, 0xae, 0x80,
+ 0x6f, 0x69, 0x52, 0x2c, 0x5f, 0xe8, 0xcc, 0xc4, 0x51, 0xae, 0xe1, 0xe4, 0x0d, 0xbb, 0xd7, 0x78,
+ 0x80, 0xe9, 0xcc, 0xcc, 0x8f, 0xe2, 0x83, 0xc1, 0xe9, 0x7f, 0x0e, 0x6e, 0xd7, 0xd0, 0x3e, 0x7d,
+ 0x21, 0x7e, 0x3a, 0x3c, 0xc5, 0xe0, 0x6f, 0x87, 0x99, 0x89, 0x27, 0xc4, 0x2f, 0x87, 0x19, 0x93,
+ 0x20, 0x1a, 0x0c, 0xa8, 0x78, 0xbe, 0xd0, 0xe7, 0x26, 0xcd, 0x52, 0x81, 0x08, 0x68, 0x47, 0x25,
+ 0x92, 0x94, 0x28, 0x2b, 0xb4, 0x26, 0x6d, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x87, 0x3e, 0x63,
+ 0x69, 0x7d, 0x01, 0x00, 0x00,
+}
diff --git a/logger/logger.go b/logger/logger.go
new file mode 100644
index 0000000..1712899
--- /dev/null
+++ b/logger/logger.go
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2019 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/
+// https://github.com/amitrai48/logger
+
+package logger
+
+import (
+ "github.com/sirupsen/logrus"
+)
+
+// Fields Type to pass when we want to call WithFields for structured logging
+type Fields map[string]interface{}
+
+const (
+ //Debug has verbose message
+ Debug = "debug"
+ //Info is default log level
+ Info = "info"
+ //Warn is for logging messages about possible issues
+ Warn = "warn"
+ //Error is for logging errors
+ Error = "error"
+ //Fatal is for logging fatal messages. The sytem shutsdown after logging the message.
+ Fatal = "fatal"
+)
+
+// Logger is the common interface for logging.
+type Logger interface {
+ Debugf(format string, args ...interface{})
+
+ Infof(format string, args ...interface{})
+
+ Warnf(format string, args ...interface{})
+
+ Errorf(format string, args ...interface{})
+
+ Fatalf(format string, args ...interface{})
+
+ Panicf(format string, args ...interface{})
+
+ WithFields(keyValues Fields) Logger
+}
+
+// Configuration stores the config for the logger
+// For some loggers there can only be one level across writers, for such the level of Console is picked by default
+type Configuration struct {
+ EnableConsole bool
+ ConsoleJSONFormat bool
+ ConsoleLevel string
+ EnableFile bool
+ FileJSONFormat bool
+ FileLevel string
+
+ // Filename is the file to write logs to. Backup log files will be retained
+ // in the same directory. It uses -lumberjack.log in
+ // os.TempDir() if empty.
+ Filename string
+
+ // MaxSize is the maximum size in megabytes of the log file before it gets
+ // rotated. It defaults to 100 megabytes.
+ MaxSizeMB int
+
+ // MaxAge is the maximum number of days to retain old log files based on the
+ // timestamp encoded in their filename. Note that a day is defined as 24
+ // hours and may not exactly correspond to calendar days due to daylight
+ // savings, leap seconds, etc. The default is 7 days.
+ MaxAgeDays int
+
+ // MaxBackups is the maximum number of old log files to retain. The default
+ // is to retain all old log files (though MaxAge may still cause them to get
+ // deleted.)
+ MaxBackups int
+
+ // LocalTime determines if the time used for formatting the timestamps in
+ // backup files is the computer's local time. The default is to use UTC
+ // time.
+ LocalTime bool
+}
+
+// GetDefaultLogger creates a default logger.
+func GetDefaultLogger() Logger {
+ return NewLogrusLogger(logrus.StandardLogger())
+}
+
+// normalizeConfig to enforce default value in configuration.
+func normalizeConfig(config *Configuration) {
+ if config.MaxSizeMB <= 0 {
+ config.MaxSizeMB = 100
+ }
+
+ if config.MaxAgeDays <= 0 {
+ config.MaxAgeDays = 7
+ }
+
+ if config.MaxBackups < 0 {
+ config.MaxBackups = 0
+ }
+}
diff --git a/logger/logger_test.go b/logger/logger_test.go
new file mode 100644
index 0000000..980c022
--- /dev/null
+++ b/logger/logger_test.go
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/
+
+package logger
+
+import (
+ "testing"
+
+ "github.com/sirupsen/logrus"
+)
+
+func TestLogrusLoggerWithConfig(t *testing.T) {
+ config := Configuration{
+ EnableConsole: true,
+ ConsoleLevel: Debug,
+ ConsoleJSONFormat: false,
+ EnableFile: false,
+ FileLevel: Info,
+ FileJSONFormat: true,
+ }
+
+ log := NewLogrusLoggerWithConfig(config)
+
+ contextLogger := log.WithFields(Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with logrus")
+ contextLogger.Infof("Logrus is awesome")
+}
+
+func TestLogrusLogger(t *testing.T) {
+ // adapts to Logger interface
+ log := NewLogrusLogger(logrus.StandardLogger())
+
+ contextLogger := log.WithFields(Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with logrus")
+ contextLogger.Infof("Logrus is awesome")
+}
diff --git a/logger/logrus.go b/logger/logrus.go
new file mode 100644
index 0000000..e4f7a67
--- /dev/null
+++ b/logger/logrus.go
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2019 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/
+// https://github.com/amitrai48/logger
+
+package logger
+
+import (
+ "io"
+ "os"
+
+ "github.com/sirupsen/logrus"
+ lumberjack "gopkg.in/natefinch/lumberjack.v2"
+)
+
+type LogrusLogEntry struct {
+ entry *logrus.Entry
+}
+
+type LogrusLogger struct {
+ logger logrus.FieldLogger
+}
+
+// NewLogrusLogger adapts existing logrus logger to Logger interface.
+// The call is responsible for configuring logrus logger appropriately.
+func NewLogrusLogger(lLogger logrus.FieldLogger) Logger {
+ return &LogrusLogger{
+ logger: lLogger,
+ }
+}
+
+// NewLogrusLoggerWithConfig creates and configs Logger instance backed by
+// logrus logger.
+func NewLogrusLoggerWithConfig(config Configuration) Logger {
+ logLevel := config.ConsoleLevel
+ if logLevel == "" {
+ logLevel = config.FileLevel
+ }
+
+ level, err := logrus.ParseLevel(logLevel)
+ if err != nil {
+ // fallback to InfoLevel
+ level = logrus.InfoLevel
+ }
+
+ normalizeConfig(&config)
+
+ stdOutHandler := os.Stdout
+ fileHandler := &lumberjack.Logger{
+ Filename: config.Filename,
+ MaxSize: config.MaxSizeMB,
+ Compress: true,
+ MaxAge: config.MaxAgeDays,
+ MaxBackups: config.MaxBackups,
+ LocalTime: config.LocalTime,
+ }
+ lLogger := &logrus.Logger{
+ Out: stdOutHandler,
+ Formatter: getFormatter(config.ConsoleJSONFormat),
+ Hooks: make(logrus.LevelHooks),
+ Level: level,
+ }
+
+ if config.EnableConsole && config.EnableFile {
+ lLogger.SetOutput(io.MultiWriter(stdOutHandler, fileHandler))
+ } else {
+ if config.EnableFile {
+ lLogger.SetOutput(fileHandler)
+ lLogger.SetFormatter(getFormatter(config.FileJSONFormat))
+ }
+ }
+
+ return &LogrusLogger{
+ logger: lLogger,
+ }
+}
+
+func (l *LogrusLogger) Debugf(format string, args ...interface{}) {
+ l.logger.Debugf(format, args...)
+}
+
+func (l *LogrusLogger) Infof(format string, args ...interface{}) {
+ l.logger.Infof(format, args...)
+}
+
+func (l *LogrusLogger) Warnf(format string, args ...interface{}) {
+ l.logger.Warnf(format, args...)
+}
+
+func (l *LogrusLogger) Errorf(format string, args ...interface{}) {
+ l.logger.Errorf(format, args...)
+}
+
+func (l *LogrusLogger) Fatalf(format string, args ...interface{}) {
+ l.logger.Fatalf(format, args...)
+}
+
+func (l *LogrusLogger) Panicf(format string, args ...interface{}) {
+ l.logger.Fatalf(format, args...)
+}
+
+func (l *LogrusLogger) WithFields(fields Fields) Logger {
+ return &LogrusLogEntry{
+ entry: l.logger.WithFields(convertToLogrusFields(fields)),
+ }
+}
+
+func (l *LogrusLogEntry) Debugf(format string, args ...interface{}) {
+ l.entry.Debugf(format, args...)
+}
+
+func (l *LogrusLogEntry) Infof(format string, args ...interface{}) {
+ l.entry.Infof(format, args...)
+}
+
+func (l *LogrusLogEntry) Warnf(format string, args ...interface{}) {
+ l.entry.Warnf(format, args...)
+}
+
+func (l *LogrusLogEntry) Errorf(format string, args ...interface{}) {
+ l.entry.Errorf(format, args...)
+}
+
+func (l *LogrusLogEntry) Fatalf(format string, args ...interface{}) {
+ l.entry.Fatalf(format, args...)
+}
+
+func (l *LogrusLogEntry) Panicf(format string, args ...interface{}) {
+ l.entry.Fatalf(format, args...)
+}
+
+func (l *LogrusLogEntry) WithFields(fields Fields) Logger {
+ return &LogrusLogEntry{
+ entry: l.entry.WithFields(convertToLogrusFields(fields)),
+ }
+}
+
+func getFormatter(isJSON bool) logrus.Formatter {
+ if isJSON {
+ return &logrus.JSONFormatter{}
+ }
+ return &logrus.TextFormatter{
+ FullTimestamp: true,
+ DisableLevelTruncation: true,
+ }
+}
+
+func convertToLogrusFields(fields Fields) logrus.Fields {
+ logrusFields := logrus.Fields{}
+ for index, val := range fields {
+ logrusFields[index] = val
+ }
+ return logrusFields
+}
diff --git a/logger/zap/zap.go b/logger/zap/zap.go
new file mode 100644
index 0000000..237303e
--- /dev/null
+++ b/logger/zap/zap.go
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2019 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/
+// https://github.com/amitrai48/logger
+
+package zap
+
+import (
+ "os"
+
+ "github.com/vmware/vmware-go-kcl/logger"
+ uzap "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ lumberjack "gopkg.in/natefinch/lumberjack.v2"
+)
+
+type ZapLogger struct {
+ sugaredLogger *uzap.SugaredLogger
+}
+
+// NewZapLogger adapts existing sugared zap logger to Logger interface.
+// The call is responsible for configuring sugard zap logger appropriately.
+//
+// Note: Sugar wraps the Logger to provide a more ergonomic, but slightly slower,
+// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a
+// single application to use both Loggers and SugaredLoggers, converting
+// between them on the boundaries of performance-sensitive code.
+//
+// Base zap logger can be convert to SugaredLogger by calling to add a wrapper:
+// sugaredLogger := log.Sugar()
+//
+func NewZapLogger(logger *uzap.SugaredLogger) logger.Logger {
+ return &ZapLogger{
+ sugaredLogger: logger,
+ }
+}
+
+// NewZapLoggerWithConfig creates and configs Logger instance backed by
+// zap Sugared logger.
+func NewZapLoggerWithConfig(config logger.Configuration) logger.Logger {
+ cores := []zapcore.Core{}
+
+ if config.EnableConsole {
+ level := getZapLevel(config.ConsoleLevel)
+ writer := zapcore.Lock(os.Stdout)
+ core := zapcore.NewCore(getEncoder(config.ConsoleJSONFormat), writer, level)
+ cores = append(cores, core)
+ }
+
+ if config.EnableFile {
+ level := getZapLevel(config.FileLevel)
+ writer := zapcore.AddSync(&lumberjack.Logger{
+ Filename: config.Filename,
+ MaxSize: config.MaxSizeMB,
+ Compress: true,
+ MaxAge: config.MaxAgeDays,
+ MaxBackups: config.MaxBackups,
+ LocalTime: config.LocalTime,
+ })
+ core := zapcore.NewCore(getEncoder(config.FileJSONFormat), writer, level)
+ cores = append(cores, core)
+ }
+
+ combinedCore := zapcore.NewTee(cores...)
+
+ // AddCallerSkip skips 2 number of callers, this is important else the file that gets
+ // logged will always be the wrapped file. In our case zap.go
+ logger := uzap.New(combinedCore,
+ uzap.AddCallerSkip(2),
+ uzap.AddCaller(),
+ ).Sugar()
+
+ return &ZapLogger{
+ sugaredLogger: logger,
+ }
+}
+
+func (l *ZapLogger) Debugf(format string, args ...interface{}) {
+ l.sugaredLogger.Debugf(format, args...)
+}
+
+func (l *ZapLogger) Infof(format string, args ...interface{}) {
+ l.sugaredLogger.Infof(format, args...)
+}
+
+func (l *ZapLogger) Warnf(format string, args ...interface{}) {
+ l.sugaredLogger.Warnf(format, args...)
+}
+
+func (l *ZapLogger) Errorf(format string, args ...interface{}) {
+ l.sugaredLogger.Errorf(format, args...)
+}
+
+func (l *ZapLogger) Fatalf(format string, args ...interface{}) {
+ l.sugaredLogger.Fatalf(format, args...)
+}
+
+func (l *ZapLogger) Panicf(format string, args ...interface{}) {
+ l.sugaredLogger.Fatalf(format, args...)
+}
+
+func (l *ZapLogger) WithFields(fields logger.Fields) logger.Logger {
+ var f = make([]interface{}, 0)
+ for k, v := range fields {
+ f = append(f, k)
+ f = append(f, v)
+ }
+ newLogger := l.sugaredLogger.With(f...)
+ return &ZapLogger{newLogger}
+}
+
+func getEncoder(isJSON bool) zapcore.Encoder {
+ encoderConfig := uzap.NewProductionEncoderConfig()
+ encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ if isJSON {
+ return zapcore.NewJSONEncoder(encoderConfig)
+ }
+ return zapcore.NewConsoleEncoder(encoderConfig)
+}
+
+func getZapLevel(level string) zapcore.Level {
+ switch level {
+ case logger.Info:
+ return zapcore.InfoLevel
+ case logger.Warn:
+ return zapcore.WarnLevel
+ case logger.Debug:
+ return zapcore.DebugLevel
+ case logger.Error:
+ return zapcore.ErrorLevel
+ case logger.Fatal:
+ return zapcore.FatalLevel
+ default:
+ return zapcore.InfoLevel
+ }
+}
diff --git a/logger/zap/zap_test.go b/logger/zap/zap_test.go
new file mode 100644
index 0000000..820f31b
--- /dev/null
+++ b/logger/zap/zap_test.go
@@ -0,0 +1,39 @@
+package zap_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/vmware/vmware-go-kcl/logger"
+ "github.com/vmware/vmware-go-kcl/logger/zap"
+ uzap "go.uber.org/zap"
+)
+
+func TestZapLoggerWithConfig(t *testing.T) {
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Debug,
+ ConsoleJSONFormat: true,
+ EnableFile: false,
+ FileLevel: logger.Info,
+ FileJSONFormat: true,
+ Filename: "log.log",
+ }
+
+ log := zap.NewZapLoggerWithConfig(config)
+
+ contextLogger := log.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with zap")
+ contextLogger.Infof("Zap is awesome")
+}
+
+func TestZapLogger(t *testing.T) {
+ zapLogger, err := uzap.NewProduction()
+ assert.Nil(t, err)
+
+ log := zap.NewZapLogger(zapLogger.Sugar())
+
+ contextLogger := log.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with zap")
+ contextLogger.Infof("Zap is awesome")
+}
diff --git a/logger/zerolog/zerolog.go b/logger/zerolog/zerolog.go
new file mode 100644
index 0000000..412540f
--- /dev/null
+++ b/logger/zerolog/zerolog.go
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2019 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/
+// https://github.com/amitrai48/logger
+
+// Package zerolog implements the KCL logger using RS Zerolog logger
+package zerolog
+
+import (
+ "github.com/rs/zerolog"
+ "github.com/vmware/vmware-go-kcl/logger"
+ "gopkg.in/natefinch/lumberjack.v2"
+ "os"
+)
+
+type zeroLogger struct {
+ log zerolog.Logger
+}
+
+// NewZerologLogger creates a new logger.Logger backed by RS Zerolog using a default config
+func NewZerologLogger() logger.Logger {
+ return NewZerologLoggerWithConfig(logger.Configuration{
+ EnableConsole: true,
+ ConsoleJSONFormat: true,
+ ConsoleLevel: logger.Info,
+ EnableFile: false,
+ FileJSONFormat: false,
+ FileLevel: logger.Info,
+ Filename: "",
+ MaxSizeMB: 0,
+ MaxAgeDays: 0,
+ MaxBackups: 0,
+ LocalTime: true,
+ })
+}
+
+// NewZerologLoggerWithConfig creates a new logger.Logger backed by RS Zerolog using the provided config
+func NewZerologLoggerWithConfig(config logger.Configuration) logger.Logger {
+ var consoleHandler *zerolog.ConsoleWriter
+ var fileHandler *lumberjack.Logger
+ var finalLogger zerolog.Logger
+
+ normalizeConfig(&config)
+
+ if config.EnableConsole {
+ consoleHandler = &zerolog.ConsoleWriter{Out: os.Stdout}
+ }
+
+ if config.EnableFile {
+ fileHandler = &lumberjack.Logger{
+ Filename: config.Filename,
+ MaxSize: config.MaxSizeMB,
+ Compress: true,
+ MaxAge: config.MaxAgeDays,
+ MaxBackups: config.MaxBackups,
+ LocalTime: config.LocalTime,
+ }
+ }
+
+ if config.EnableConsole && config.EnableFile {
+ multi := zerolog.MultiLevelWriter(consoleHandler, fileHandler)
+ finalLogger = zerolog.New(multi).Level(getZeroLogLevel(config.ConsoleLevel)).With().Timestamp().Logger()
+ } else if config.EnableFile {
+ finalLogger = zerolog.New(fileHandler).Level(getZeroLogLevel(config.FileLevel)).With().Timestamp().Logger()
+ } else {
+ finalLogger = zerolog.New(consoleHandler).Level(getZeroLogLevel(config.ConsoleLevel)).With().Timestamp().Logger()
+ }
+
+ return &zeroLogger{log: finalLogger}
+}
+
+func (z *zeroLogger) Debugf(format string, args ...interface{}) {
+ z.log.Debug().Msgf(format, args...)
+}
+
+func (z *zeroLogger) Infof(format string, args ...interface{}) {
+ z.log.Info().Msgf(format, args...)
+}
+
+func (z *zeroLogger) Warnf(format string, args ...interface{}) {
+ z.log.Warn().Msgf(format, args...)
+}
+
+func (z *zeroLogger) Errorf(format string, args ...interface{}) {
+ z.log.Error().Msgf(format, args...)
+}
+
+func (z *zeroLogger) Fatalf(format string, args ...interface{}) {
+ z.log.Fatal().Msgf(format, args...)
+}
+
+func (z *zeroLogger) Panicf(format string, args ...interface{}) {
+ z.log.Panic().Msgf(format, args...)
+}
+
+func (z *zeroLogger) WithFields(keyValues logger.Fields) logger.Logger {
+ newLogger := z.log.With()
+ for k, v := range keyValues {
+ newLogger.Interface(k, v)
+ }
+
+ return &zeroLogger{
+ log: newLogger.Logger(),
+ }
+}
+
+func getZeroLogLevel(level string) zerolog.Level {
+ switch level {
+ case logger.Info:
+ return zerolog.InfoLevel
+ case logger.Warn:
+ return zerolog.WarnLevel
+ case logger.Debug:
+ return zerolog.DebugLevel
+ case logger.Error:
+ return zerolog.ErrorLevel
+ case logger.Fatal:
+ return zerolog.FatalLevel
+ default:
+ return zerolog.InfoLevel
+ }
+}
+
+func normalizeConfig(config *logger.Configuration) {
+ if config.MaxSizeMB <= 0 {
+ config.MaxSizeMB = 100
+ }
+
+ if config.MaxAgeDays <= 0 {
+ config.MaxAgeDays = 7
+ }
+
+ if config.MaxBackups < 0 {
+ config.MaxBackups = 0
+ }
+}
diff --git a/logger/zerolog/zerolog_test.go b/logger/zerolog/zerolog_test.go
new file mode 100644
index 0000000..7d35aea
--- /dev/null
+++ b/logger/zerolog/zerolog_test.go
@@ -0,0 +1,32 @@
+package zerolog
+
+import (
+ "github.com/vmware/vmware-go-kcl/logger"
+ "testing"
+)
+
+func TestZeroLogLoggerWithConfig(t *testing.T) {
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Debug,
+ ConsoleJSONFormat: true,
+ EnableFile: true,
+ FileLevel: logger.Info,
+ FileJSONFormat: false,
+ Filename: "/tmp/kcl-zerolog-log.log",
+ }
+
+ log := NewZerologLoggerWithConfig(config)
+
+ contextLogger := log.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with rs zerolog")
+ contextLogger.Infof("Rs zerolog is awesome")
+}
+
+func TestZeroLogLogger(t *testing.T) {
+ log := NewZerologLogger()
+
+ contextLogger := log.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with zerolog")
+ contextLogger.Infof("Zerolog is awesome")
+}
diff --git a/test/lease_stealing_util_test.go b/test/lease_stealing_util_test.go
new file mode 100644
index 0000000..cbd01aa
--- /dev/null
+++ b/test/lease_stealing_util_test.go
@@ -0,0 +1,230 @@
+package test
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis"
+ "github.com/stretchr/testify/assert"
+
+ chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint"
+ cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config"
+ wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker"
+)
+
+type LeaseStealingTest struct {
+ t *testing.T
+ config *TestClusterConfig
+ cluster *TestCluster
+ kc *kinesis.Client
+ dc *dynamodb.Client
+
+ backOffSeconds int
+ maxRetries int
+}
+
+func NewLeaseStealingTest(t *testing.T, config *TestClusterConfig, workerFactory TestWorkerFactory) *LeaseStealingTest {
+ cluster := NewTestCluster(t, config, workerFactory)
+ clientConfig := cluster.workerFactory.CreateKCLConfig("test-client", config)
+ return &LeaseStealingTest{
+ t: t,
+ config: config,
+ cluster: cluster,
+ kc: NewKinesisClient(t, config.regionName, clientConfig.KinesisEndpoint, clientConfig.KinesisCredentials),
+ dc: NewDynamoDBClient(t, config.regionName, clientConfig.DynamoDBEndpoint, clientConfig.KinesisCredentials),
+ backOffSeconds: 5,
+ maxRetries: 60,
+ }
+}
+
+func (lst *LeaseStealingTest) WithBackoffSeconds(backoff int) *LeaseStealingTest {
+ lst.backOffSeconds = backoff
+ return lst
+}
+
+func (lst *LeaseStealingTest) WithMaxRetries(retries int) *LeaseStealingTest {
+ lst.maxRetries = retries
+ return lst
+}
+
+func (lst *LeaseStealingTest) publishSomeData() (stop func()) {
+ done := make(chan int)
+ wg := &sync.WaitGroup{}
+
+ wg.Add(1)
+ go func() {
+ ticker := time.NewTicker(500 * time.Millisecond)
+ defer wg.Done()
+ defer ticker.Stop()
+ for {
+ select {
+ case <-done:
+ return
+ case <-ticker.C:
+ lst.t.Log("Coninuously publishing records")
+ publishSomeData(lst.t, lst.kc)
+ }
+ }
+ }()
+
+ return func() {
+ close(done)
+ wg.Wait()
+ }
+}
+
+func (lst *LeaseStealingTest) getShardCountByWorker() map[string]int {
+ input := &dynamodb.ScanInput{
+ TableName: aws.String(lst.config.appName),
+ }
+
+ shardsByWorker := map[string]map[string]bool{}
+ scan, err := lst.dc.Scan(context.TODO(), input)
+ for _, result := range scan.Items {
+ if shardID, ok := result[chk.LeaseKeyKey]; !ok {
+ continue
+ } else if assignedTo, ok := result[chk.LeaseOwnerKey]; !ok {
+ continue
+ } else {
+ if _, ok := shardsByWorker[assignedTo.(*types.AttributeValueMemberS).Value]; !ok {
+ shardsByWorker[assignedTo.(*types.AttributeValueMemberS).Value] = map[string]bool{}
+ }
+ shardsByWorker[assignedTo.(*types.AttributeValueMemberS).Value][shardID.(*types.AttributeValueMemberS).Value] = true
+ }
+ }
+ assert.Nil(lst.t, err)
+
+ shardCountByWorker := map[string]int{}
+ for worker, shards := range shardsByWorker {
+ shardCountByWorker[worker] = len(shards)
+ }
+ return shardCountByWorker
+}
+
+type LeaseStealingAssertions struct {
+ expectedLeasesForInitialWorker int
+ expectedLeasesPerWorker int
+}
+
+func (lst *LeaseStealingTest) Run(assertions LeaseStealingAssertions) {
+ // Publish records onto stream throughout the entire duration of the test
+ stop := lst.publishSomeData()
+ defer stop()
+
+ // Start worker 1
+ worker1, _ := lst.cluster.SpawnWorker()
+
+ // Wait until the above worker has all leases
+ var worker1ShardCount int
+ for i := 0; i < lst.maxRetries; i++ {
+ time.Sleep(time.Duration(lst.backOffSeconds) * time.Second)
+
+ shardCountByWorker := lst.getShardCountByWorker()
+ if shardCount, ok := shardCountByWorker[worker1]; ok && shardCount == assertions.expectedLeasesForInitialWorker {
+ worker1ShardCount = shardCount
+ break
+ }
+ }
+
+ // Assert correct number of leases
+ assert.Equal(lst.t, assertions.expectedLeasesForInitialWorker, worker1ShardCount)
+
+ // Spawn Remaining Workers
+ for i := 0; i < lst.config.numWorkers-1; i++ {
+ lst.cluster.SpawnWorker()
+ }
+
+ // Wait For Rebalance
+ var shardCountByWorker map[string]int
+ for i := 0; i < lst.maxRetries; i++ {
+ time.Sleep(time.Duration(lst.backOffSeconds) * time.Second)
+
+ shardCountByWorker = lst.getShardCountByWorker()
+
+ correctCount := true
+ for _, count := range shardCountByWorker {
+ if count != assertions.expectedLeasesPerWorker {
+ correctCount = false
+ }
+ }
+
+ if correctCount {
+ break
+ }
+ }
+
+ // Assert Rebalanced
+ assert.Greater(lst.t, len(shardCountByWorker), 0)
+ for _, count := range shardCountByWorker {
+ assert.Equal(lst.t, assertions.expectedLeasesPerWorker, count)
+ }
+
+ // Shutdown Workers
+ time.Sleep(10 * time.Second)
+ lst.cluster.Shutdown()
+}
+
+type TestWorkerFactory interface {
+ CreateWorker(workerID string, kclConfig *cfg.KinesisClientLibConfiguration) *wk.Worker
+ CreateKCLConfig(workerID string, config *TestClusterConfig) *cfg.KinesisClientLibConfiguration
+}
+
+type TestClusterConfig struct {
+ numShards int
+ numWorkers int
+
+ appName string
+ streamName string
+ regionName string
+ workerIDTemplate string
+}
+
+type TestCluster struct {
+ t *testing.T
+ config *TestClusterConfig
+ workerFactory TestWorkerFactory
+ workerIDs []string
+ workers map[string]*wk.Worker
+}
+
+func NewTestCluster(t *testing.T, config *TestClusterConfig, workerFactory TestWorkerFactory) *TestCluster {
+ return &TestCluster{
+ t: t,
+ config: config,
+ workerFactory: workerFactory,
+ workerIDs: make([]string, 0),
+ workers: make(map[string]*wk.Worker),
+ }
+}
+
+func (tc *TestCluster) addWorker(workerID string, config *cfg.KinesisClientLibConfiguration) *wk.Worker {
+ worker := tc.workerFactory.CreateWorker(workerID, config)
+ tc.workerIDs = append(tc.workerIDs, workerID)
+ tc.workers[workerID] = worker
+ return worker
+}
+
+func (tc *TestCluster) SpawnWorker() (string, *wk.Worker) {
+ id := len(tc.workers)
+ workerID := fmt.Sprintf(tc.config.workerIDTemplate, id)
+
+ config := tc.workerFactory.CreateKCLConfig(workerID, tc.config)
+ worker := tc.addWorker(workerID, config)
+
+ err := worker.Start()
+ assert.Nil(tc.t, err)
+ return workerID, worker
+}
+
+func (tc *TestCluster) Shutdown() {
+ for workerID, worker := range tc.workers {
+ tc.t.Logf("Shutting down worker: %v", workerID)
+ worker.Shutdown()
+ }
+}
diff --git a/test/logger_test.go b/test/logger_test.go
new file mode 100644
index 0000000..f5db877
--- /dev/null
+++ b/test/logger_test.go
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2019 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/
+
+package test
+
+import (
+ "github.com/stretchr/testify/assert"
+
+ "testing"
+
+ "github.com/sirupsen/logrus"
+ "go.uber.org/zap"
+
+ "github.com/vmware/vmware-go-kcl/logger"
+ zaplogger "github.com/vmware/vmware-go-kcl/logger/zap"
+)
+
+func TestZapLoggerWithConfig(t *testing.T) {
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Debug,
+ ConsoleJSONFormat: true,
+ EnableFile: true,
+ FileLevel: logger.Info,
+ FileJSONFormat: true,
+ Filename: "log.log",
+ }
+
+ log := zaplogger.NewZapLoggerWithConfig(config)
+
+ contextLogger := log.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with zap")
+ contextLogger.Infof("Zap is awesome")
+}
+
+func TestZapLogger(t *testing.T) {
+ zapLogger, err := zap.NewProduction()
+ assert.Nil(t, err)
+
+ log := zaplogger.NewZapLogger(zapLogger.Sugar())
+
+ contextLogger := log.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with zap")
+ contextLogger.Infof("Zap is awesome")
+}
+
+func TestLogrusLoggerWithConfig(t *testing.T) {
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Debug,
+ ConsoleJSONFormat: false,
+ EnableFile: true,
+ FileLevel: logger.Info,
+ FileJSONFormat: true,
+ Filename: "log.log",
+ }
+
+ log := logger.NewLogrusLoggerWithConfig(config)
+
+ contextLogger := log.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with logrus")
+ contextLogger.Infof("Logrus is awesome")
+}
+
+func TestLogrusLogger(t *testing.T) {
+ // adapts to Logger interface from *logrus.Logger
+ log := logger.NewLogrusLogger(logrus.StandardLogger())
+
+ contextLogger := log.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with logrus")
+ contextLogger.Infof("Logrus is awesome")
+}
+
+func TestLogrusLoggerWithFieldsAtInit(t *testing.T) {
+ // adapts to Logger interface from *logrus.Entry
+ fieldLogger := logrus.StandardLogger().WithField("key0", "value0")
+ log := logger.NewLogrusLogger(fieldLogger)
+
+ contextLogger := log.WithFields(logger.Fields{"key1": "value1"})
+ contextLogger.Debugf("Starting with logrus")
+ contextLogger.Infof("Structured logging is awesome")
+}
diff --git a/test/record_processor_test.go b/test/record_processor_test.go
new file mode 100644
index 0000000..ff4fef3
--- /dev/null
+++ b/test/record_processor_test.go
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2020 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// Package test
+package test
+
+import (
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/stretchr/testify/assert"
+
+ kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces"
+)
+
+// Record processor factory is used to create RecordProcessor
+func recordProcessorFactory(t *testing.T) kc.IRecordProcessorFactory {
+ return &dumpRecordProcessorFactory{t: t}
+}
+
+// simple record processor and dump everything
+type dumpRecordProcessorFactory struct {
+ t *testing.T
+}
+
+func (d *dumpRecordProcessorFactory) CreateProcessor() kc.IRecordProcessor {
+ return &dumpRecordProcessor{
+ t: d.t,
+ }
+}
+
+// Create a dump record processor for printing out all data from record.
+type dumpRecordProcessor struct {
+ t *testing.T
+ count int
+}
+
+func (dd *dumpRecordProcessor) Initialize(input *kc.InitializationInput) {
+ dd.t.Logf("Processing SharId: %v at checkpoint: %v", input.ShardId, aws.ToString(input.ExtendedSequenceNumber.SequenceNumber))
+ shardID = input.ShardId
+ dd.count = 0
+}
+
+func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) {
+ dd.t.Log("Processing Records...")
+
+ // don't process empty record
+ if len(input.Records) == 0 {
+ return
+ }
+
+ for _, v := range input.Records {
+ dd.t.Logf("Record = %s", v.Data)
+ assert.Equal(dd.t, specstr, string(v.Data))
+ dd.count++
+ }
+
+ // checkpoint it after processing this batch.
+ // Especially, for processing de-aggregated KPL records, checkpointing has to happen at the end of batch
+ // because de-aggregated records share the same sequence number.
+ lastRecordSequenceNumber := input.Records[len(input.Records)-1].SequenceNumber
+ // Calculate the time taken from polling records and delivering to record processor for a batch.
+ diff := input.CacheExitTime.Sub(*input.CacheEntryTime)
+ dd.t.Logf("Checkpoint progress at: %v, MillisBehindLatest = %v, KCLProcessTime = %v", lastRecordSequenceNumber, input.MillisBehindLatest, diff)
+ _ = input.Checkpointer.Checkpoint(lastRecordSequenceNumber)
+}
+
+func (dd *dumpRecordProcessor) Shutdown(input *kc.ShutdownInput) {
+ dd.t.Logf("Shutdown Reason: %v", aws.ToString(kc.ShutdownReasonMessage(input.ShutdownReason)))
+ dd.t.Logf("Processed Record Count = %d", dd.count)
+
+ // When the value of {@link ShutdownInput#getShutdownReason()} is
+ // {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you
+ // checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress.
+ if input.ShutdownReason == kc.TERMINATE {
+ _ = input.Checkpointer.Checkpoint(nil)
+ }
+
+ assert.True(dd.t, dd.count > 0)
+}
diff --git a/test/record_publisher_test.go b/test/record_publisher_test.go
new file mode 100644
index 0000000..5bd061a
--- /dev/null
+++ b/test/record_publisher_test.go
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2020 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+package test
+
+import (
+ "context"
+ "crypto/md5"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ awsConfig "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis/types"
+ "github.com/golang/protobuf/proto"
+
+ "github.com/vmware/vmware-go-kcl/clientlibrary/utils"
+ rec "github.com/vmware/vmware-go-kcl/internal/records"
+)
+
+const specstr = `{"name":"kube-qQyhk","networking":{"containerNetworkCidr":"10.2.0.0/16"},"orgName":"BVT-Org-cLQch","projectName":"project-tDSJd","serviceLevel":"DEVELOPER","size":{"count":1},"version":"1.8.1-4"}`
+
+// NewKinesisClient to create a Kinesis Client.
+func NewKinesisClient(t *testing.T, regionName, endpoint string, creds *credentials.StaticCredentialsProvider) *kinesis.Client {
+ // create session for Kinesis
+ t.Logf("Creating Kinesis client")
+
+ resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ PartitionID: "aws",
+ URL: endpoint,
+ SigningRegion: regionName,
+ }, nil
+ })
+
+ cfg, err := awsConfig.LoadDefaultConfig(
+ context.TODO(),
+ awsConfig.WithRegion(regionName),
+ awsConfig.WithCredentialsProvider(
+ credentials.NewStaticCredentialsProvider(
+ creds.Value.AccessKeyID,
+ creds.Value.SecretAccessKey,
+ creds.Value.SessionToken)),
+ awsConfig.WithEndpointResolver(resolver),
+ awsConfig.WithRetryer(func() aws.Retryer {
+ return retry.AddWithMaxBackoffDelay(retry.NewStandard(), retry.DefaultMaxBackoff)
+ }),
+ )
+
+ if err != nil {
+ // no need to move forward
+ t.Fatalf("Failed in loading Kinesis default config for creating Worker: %+v", err)
+ }
+
+ return kinesis.NewFromConfig(cfg)
+}
+
+// NewDynamoDBClient to create a Kinesis Client.
+func NewDynamoDBClient(t *testing.T, regionName, endpoint string, creds *credentials.StaticCredentialsProvider) *dynamodb.Client {
+ resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ PartitionID: "aws",
+ URL: endpoint,
+ SigningRegion: regionName,
+ }, nil
+ })
+
+ cfg, err := awsConfig.LoadDefaultConfig(
+ context.TODO(),
+ awsConfig.WithRegion(regionName),
+ awsConfig.WithCredentialsProvider(
+ credentials.NewStaticCredentialsProvider(
+ creds.Value.AccessKeyID,
+ creds.Value.SecretAccessKey,
+ creds.Value.SessionToken)),
+ awsConfig.WithEndpointResolver(resolver),
+ awsConfig.WithRetryer(func() aws.Retryer {
+ return retry.AddWithMaxBackoffDelay(retry.NewStandard(), retry.DefaultMaxBackoff)
+ }),
+ )
+
+ if err != nil {
+ t.Fatalf("unable to load SDK config, %v", err)
+ }
+
+ return dynamodb.NewFromConfig(cfg)
+}
+
+func continuouslyPublishSomeData(t *testing.T, kc *kinesis.Client) func() {
+ var shards []types.Shard
+ var nextToken *string
+ for {
+ out, err := kc.ListShards(context.TODO(), &kinesis.ListShardsInput{
+ StreamName: aws.String(streamName),
+ NextToken: nextToken,
+ })
+ if err != nil {
+ t.Errorf("Error in ListShards. %+v", err)
+ }
+
+ shards = append(shards, out.Shards...)
+ if out.NextToken == nil {
+ break
+ }
+ nextToken = out.NextToken
+ }
+
+ done := make(chan int)
+ wg := &sync.WaitGroup{}
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ ticker := time.NewTicker(500 * time.Millisecond)
+ for {
+ select {
+ case <-done:
+ return
+ case <-ticker.C:
+ publishToAllShards(t, kc, shards)
+ publishSomeData(t, kc)
+ }
+ }
+ }()
+
+ return func() {
+ close(done)
+ wg.Wait()
+ }
+}
+
+func publishToAllShards(t *testing.T, kc *kinesis.Client, shards []types.Shard) {
+ // Put records to all shards
+ for i := 0; i < 10; i++ {
+ for _, shard := range shards {
+ publishRecord(t, kc, shard.HashKeyRange.StartingHashKey)
+ }
+ }
+}
+
+// publishSomeData to put some records into Kinesis stream
+func publishSomeData(t *testing.T, kc *kinesis.Client) {
+ // Put some data into stream.
+ t.Log("Putting data into stream using PutRecord API...")
+ for i := 0; i < 50; i++ {
+ publishRecord(t, kc, nil)
+ }
+ t.Log("Done putting data into stream using PutRecord API.")
+
+ // Put some data into stream using PutRecords API
+ t.Log("Putting data into stream using PutRecords API...")
+ for i := 0; i < 10; i++ {
+ publishRecords(t, kc)
+ }
+ t.Log("Done putting data into stream using PutRecords API.")
+
+ // Put some data into stream using KPL Aggregate Record format
+ t.Log("Putting data into stream using KPL Aggregate Record ...")
+ for i := 0; i < 10; i++ {
+ publishAggregateRecord(t, kc)
+ }
+ t.Log("Done putting data into stream using KPL Aggregate Record.")
+}
+
+// publishRecord to put a record into Kinesis stream using PutRecord API.
+func publishRecord(t *testing.T, kc *kinesis.Client, hashKey *string) {
+ input := &kinesis.PutRecordInput{
+ Data: []byte(specstr),
+ StreamName: aws.String(streamName),
+ PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)),
+ }
+ if hashKey != nil {
+ input.ExplicitHashKey = hashKey
+ }
+ // Use random string as partition key to ensure even distribution across shards
+ _, err := kc.PutRecord(context.TODO(), input)
+
+ if err != nil {
+ t.Errorf("Error in PutRecord. %+v", err)
+ }
+}
+
+// publishRecord to put a record into Kinesis stream using PutRecords API.
+func publishRecords(t *testing.T, kc *kinesis.Client) {
+ // Use random string as partition key to ensure even distribution across shards
+ records := make([]types.PutRecordsRequestEntry, 5)
+
+ for i := 0; i < 5; i++ {
+ record := types.PutRecordsRequestEntry{
+ Data: []byte(specstr),
+ PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)),
+ }
+ records[i] = record
+ }
+
+ _, err := kc.PutRecords(context.TODO(), &kinesis.PutRecordsInput{
+ Records: records,
+ StreamName: aws.String(streamName),
+ })
+
+ if err != nil {
+ t.Errorf("Error in PutRecords. %+v", err)
+ }
+}
+
+// publishRecord to put a record into Kinesis stream using PutRecord API.
+func publishAggregateRecord(t *testing.T, kc *kinesis.Client) {
+ data := generateAggregateRecord(5, specstr)
+ // Use random string as partition key to ensure even distribution across shards
+ _, err := kc.PutRecord(context.TODO(), &kinesis.PutRecordInput{
+ Data: data,
+ StreamName: aws.String(streamName),
+ PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)),
+ })
+
+ if err != nil {
+ t.Errorf("Error in PutRecord. %+v", err)
+ }
+}
+
+// generateAggregateRecord generates an aggregate record in the correct AWS-specified format used by KPL.
+// https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md
+// copy from: https://github.com/awslabs/kinesis-aggregation/blob/master/go/deaggregator/deaggregator_test.go
+func generateAggregateRecord(numRecords int, content string) []byte {
+ aggr := &rec.AggregatedRecord{}
+ // Start with the magic header
+ aggRecord := []byte("\xf3\x89\x9a\xc2")
+ partKeyTable := make([]string, 0)
+
+ // Create proto record with numRecords length
+ for i := 0; i < numRecords; i++ {
+ var partKey uint64
+ var hashKey uint64
+ partKey = uint64(i)
+ hashKey = uint64(i) * uint64(10)
+ r := &rec.Record{
+ PartitionKeyIndex: &partKey,
+ ExplicitHashKeyIndex: &hashKey,
+ Data: []byte(content),
+ Tags: make([]*rec.Tag, 0),
+ }
+
+ aggr.Records = append(aggr.Records, r)
+ partKeyVal := fmt.Sprint(i)
+ partKeyTable = append(partKeyTable, partKeyVal)
+ }
+
+ aggr.PartitionKeyTable = partKeyTable
+ // Marshal to protobuf record, create md5 sum from proto record
+ // and append both to aggRecord with magic header
+ data, _ := proto.Marshal(aggr)
+ md5Hash := md5.Sum(data)
+ aggRecord = append(aggRecord, data...)
+ aggRecord = append(aggRecord, md5Hash[:]...)
+ return aggRecord
+}
diff --git a/test/worker_custom_test.go b/test/worker_custom_test.go
new file mode 100644
index 0000000..a513b56
--- /dev/null
+++ b/test/worker_custom_test.go
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+package test
+
+import (
+ "context"
+ "os"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/service/kinesis"
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+
+ chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint"
+ cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config"
+ par "github.com/vmware/vmware-go-kcl/clientlibrary/partition"
+ wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker"
+)
+
+func TestWorkerInjectCheckpointer(t *testing.T) {
+ kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000)
+ log.SetOutput(os.Stdout)
+ log.SetLevel(log.DebugLevel)
+
+ assert.Equal(t, regionName, kclConfig.RegionName)
+ assert.Equal(t, streamName, kclConfig.StreamName)
+
+ // configure cloudwatch as metrics system
+ kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem))
+
+ // Put some data into stream.
+ kc := NewKinesisClient(t, regionName, kclConfig.KinesisEndpoint, kclConfig.KinesisCredentials)
+ // publishSomeData(t, kc)
+ stop := continuouslyPublishSomeData(t, kc)
+ defer stop()
+
+ // custom checkpointer or a mock checkpointer.
+ checkpointer := chk.NewDynamoCheckpoint(kclConfig)
+
+ // Inject a custom checkpointer into the worker.
+ worker := wk.NewWorker(recordProcessorFactory(t), kclConfig).
+ WithCheckpointer(checkpointer)
+
+ err := worker.Start()
+ assert.Nil(t, err)
+
+ // wait a few seconds before shutdown processing
+ time.Sleep(30 * time.Second)
+ worker.Shutdown()
+
+ // verify the checkpointer after graceful shutdown
+ status := &par.ShardStatus{
+ ID: shardID,
+ Mux: &sync.RWMutex{},
+ }
+
+ _ = checkpointer.FetchCheckpoint(status)
+
+ // checkpointer should be the same
+ assert.NotEmpty(t, status.Checkpoint)
+
+ // Only the lease owner has been wiped out
+ assert.Equal(t, "", status.GetLeaseOwner())
+
+}
+
+func TestWorkerInjectKinesis(t *testing.T) {
+ kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000)
+
+ log.SetOutput(os.Stdout)
+ log.SetLevel(log.DebugLevel)
+
+ assert.Equal(t, regionName, kclConfig.RegionName)
+ assert.Equal(t, streamName, kclConfig.StreamName)
+
+ // configure cloudwatch as metrics system
+ kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem))
+
+ defaultConfig, err := config.LoadDefaultConfig(
+ context.TODO(),
+ config.WithRegion(regionName),
+ )
+
+ assert.Nil(t, err)
+ kc := kinesis.NewFromConfig(defaultConfig)
+
+ // Put some data into stream.
+ // publishSomeData(t, kc)
+ stop := continuouslyPublishSomeData(t, kc)
+ defer stop()
+
+ // Inject a custom checkpointer into the worker.
+ worker := wk.NewWorker(recordProcessorFactory(t), kclConfig).
+ WithKinesis(kc)
+
+ err = worker.Start()
+ assert.Nil(t, err)
+
+ // wait a few seconds before shutdown processing
+ time.Sleep(30 * time.Second)
+ worker.Shutdown()
+}
+
+func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) {
+ kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000)
+
+ log.SetOutput(os.Stdout)
+ log.SetLevel(log.DebugLevel)
+
+ assert.Equal(t, regionName, kclConfig.RegionName)
+ assert.Equal(t, streamName, kclConfig.StreamName)
+
+ // configure cloudwatch as metrics system
+ kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem))
+
+ // create custom Kinesis
+ defaultConfig, err := config.LoadDefaultConfig(
+ context.TODO(),
+ config.WithRegion(regionName),
+ )
+
+ assert.Nil(t, err)
+ kc := kinesis.NewFromConfig(defaultConfig)
+
+ // Put some data into stream.
+ // publishSomeData(t, kc)
+ stop := continuouslyPublishSomeData(t, kc)
+ defer stop()
+
+ // custom checkpointer or a mock checkpointer.
+ checkpointer := chk.NewDynamoCheckpoint(kclConfig)
+
+ // Inject both custom checkpointer and kinesis into the worker.
+ worker := wk.NewWorker(recordProcessorFactory(t), kclConfig).
+ WithKinesis(kc).
+ WithCheckpointer(checkpointer)
+
+ err = worker.Start()
+ assert.Nil(t, err)
+
+ // wait a few seconds before shutdown processing
+ time.Sleep(30 * time.Second)
+ worker.Shutdown()
+}
diff --git a/test/worker_lease_stealing_test.go b/test/worker_lease_stealing_test.go
new file mode 100644
index 0000000..3742a8f
--- /dev/null
+++ b/test/worker_lease_stealing_test.go
@@ -0,0 +1,127 @@
+package test
+
+import (
+ "testing"
+
+ chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint"
+ cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config"
+ wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker"
+ "github.com/vmware/vmware-go-kcl/logger"
+)
+
+func TestLeaseStealing(t *testing.T) {
+ config := &TestClusterConfig{
+ numShards: 4,
+ numWorkers: 2,
+ appName: appName,
+ streamName: streamName,
+ regionName: regionName,
+ workerIDTemplate: workerID + "-%v",
+ }
+ test := NewLeaseStealingTest(t, config, newLeaseStealingWorkerFactory(t))
+ test.Run(LeaseStealingAssertions{
+ expectedLeasesForInitialWorker: config.numShards,
+ expectedLeasesPerWorker: config.numShards / config.numWorkers,
+ })
+}
+
+type leaseStealingWorkerFactory struct {
+ t *testing.T
+}
+
+func newLeaseStealingWorkerFactory(t *testing.T) *leaseStealingWorkerFactory {
+ return &leaseStealingWorkerFactory{t}
+}
+
+func (wf *leaseStealingWorkerFactory) CreateKCLConfig(workerID string, config *TestClusterConfig) *cfg.KinesisClientLibConfiguration {
+ log := logger.NewLogrusLoggerWithConfig(logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Error,
+ ConsoleJSONFormat: false,
+ EnableFile: true,
+ FileLevel: logger.Info,
+ FileJSONFormat: true,
+ Filename: "log.log",
+ })
+
+ log.WithFields(logger.Fields{"worker": workerID})
+
+ return cfg.NewKinesisClientLibConfig(config.appName, config.streamName, config.regionName, workerID).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(10000).
+ WithLeaseStealing(true).
+ WithLogger(log)
+}
+
+func (wf *leaseStealingWorkerFactory) CreateWorker(_ string, kclConfig *cfg.KinesisClientLibConfiguration) *wk.Worker {
+ worker := wk.NewWorker(recordProcessorFactory(wf.t), kclConfig)
+ return worker
+}
+
+func TestLeaseStealingInjectCheckpointer(t *testing.T) {
+ config := &TestClusterConfig{
+ numShards: 4,
+ numWorkers: 2,
+ appName: appName,
+ streamName: streamName,
+ regionName: regionName,
+ workerIDTemplate: workerID + "-%v",
+ }
+ test := NewLeaseStealingTest(t, config, newleaseStealingWorkerFactoryCustomChk(t))
+ test.Run(LeaseStealingAssertions{
+ expectedLeasesForInitialWorker: config.numShards,
+ expectedLeasesPerWorker: config.numShards / config.numWorkers,
+ })
+}
+
+type leaseStealingWorkerFactoryCustom struct {
+ *leaseStealingWorkerFactory
+}
+
+func newleaseStealingWorkerFactoryCustomChk(t *testing.T) *leaseStealingWorkerFactoryCustom {
+ return &leaseStealingWorkerFactoryCustom{
+ newLeaseStealingWorkerFactory(t),
+ }
+}
+
+func (wfc *leaseStealingWorkerFactoryCustom) CreateWorker(workerID string, kclConfig *cfg.KinesisClientLibConfiguration) *wk.Worker {
+ worker := wfc.leaseStealingWorkerFactory.CreateWorker(workerID, kclConfig)
+ checkpointer := chk.NewDynamoCheckpoint(kclConfig)
+ return worker.WithCheckpointer(checkpointer)
+}
+
+func TestLeaseStealingWithMaxLeasesForWorker(t *testing.T) {
+ config := &TestClusterConfig{
+ numShards: 4,
+ numWorkers: 2,
+ appName: appName,
+ streamName: streamName,
+ regionName: regionName,
+ workerIDTemplate: workerID + "-%v",
+ }
+ test := NewLeaseStealingTest(t, config, newLeaseStealingWorkerFactoryMaxLeases(t, config.numShards-1))
+ test.Run(LeaseStealingAssertions{
+ expectedLeasesForInitialWorker: config.numShards - 1,
+ expectedLeasesPerWorker: 2,
+ })
+}
+
+type leaseStealingWorkerFactoryMaxLeases struct {
+ maxLeases int
+ *leaseStealingWorkerFactory
+}
+
+func newLeaseStealingWorkerFactoryMaxLeases(t *testing.T, maxLeases int) *leaseStealingWorkerFactoryMaxLeases {
+ return &leaseStealingWorkerFactoryMaxLeases{
+ maxLeases,
+ newLeaseStealingWorkerFactory(t),
+ }
+}
+
+func (wfm *leaseStealingWorkerFactoryMaxLeases) CreateKCLConfig(workerID string, config *TestClusterConfig) *cfg.KinesisClientLibConfiguration {
+ kclConfig := wfm.leaseStealingWorkerFactory.CreateKCLConfig(workerID, config)
+ kclConfig.WithMaxLeasesForWorker(wfm.maxLeases)
+ return kclConfig
+}
diff --git a/test/worker_test.go b/test/worker_test.go
new file mode 100644
index 0000000..935ed45
--- /dev/null
+++ b/test/worker_test.go
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2018 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is furnished to do
+ * so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+package test
+
+import (
+ "net/http"
+ "os"
+ "os/signal"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/prometheus/common/expfmt"
+ "github.com/stretchr/testify/assert"
+
+ cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config"
+ "github.com/vmware/vmware-go-kcl/clientlibrary/metrics"
+ "github.com/vmware/vmware-go-kcl/clientlibrary/metrics/cloudwatch"
+ "github.com/vmware/vmware-go-kcl/clientlibrary/metrics/prometheus"
+ wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker"
+ "github.com/vmware/vmware-go-kcl/logger"
+ zaplogger "github.com/vmware/vmware-go-kcl/logger/zap"
+)
+
+const (
+ appName = "appName"
+ streamName = "kcl-test"
+ regionName = "us-west-2"
+ workerID = "test-worker"
+ consumerName = "enhanced-fan-out-consumer"
+ kinesisEndpoint = "https://kinesis.eu-west-1.amazonaws.com"
+ dynamoEndpoint = "https://dynamodb.eu-west-1.amazonaws.com"
+)
+
+const metricsSystem = "cloudwatch"
+
+var shardID string
+
+func TestWorker(t *testing.T) {
+ // At minimal. use standard logrus logger
+ // log := logger.NewLogrusLogger(logrus.StandardLogger())
+ //
+ // In order to have precise control over logging. Use logger with config
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Error,
+ ConsoleJSONFormat: false,
+ EnableFile: true,
+ FileLevel: logger.Info,
+ FileJSONFormat: true,
+ Filename: "log.log",
+ }
+ // Use logrus logger
+ log := logger.NewLogrusLoggerWithConfig(config)
+
+ kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(8).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLogger(log).
+ WithKinesisEndpoint(kinesisEndpoint)
+
+ runTest(kclConfig, false, t)
+}
+
+func TestWorkerWithTimestamp(t *testing.T) {
+ // In order to have precise control over logging. Use logger with config
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Debug,
+ ConsoleJSONFormat: false,
+ }
+ // Use logrus logger
+ log := logger.NewLogrusLoggerWithConfig(config)
+
+ ts := time.Now().Add(time.Second * 5)
+ kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID).
+ WithTimestampAtInitialPositionInStream(&ts).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLogger(log).
+ WithKinesisEndpoint(kinesisEndpoint)
+
+ runTest(kclConfig, false, t)
+}
+
+func TestWorkerWithSigInt(t *testing.T) {
+ // At miminal. use standard zap logger
+ //zapLogger, err := zap.NewProduction()
+ //assert.Nil(t, err)
+ //log := zaplogger.NewZapLogger(zapLogger.Sugar())
+ //
+ // In order to have precise control over logging. Use logger with config.
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Debug,
+ ConsoleJSONFormat: true,
+ EnableFile: true,
+ FileLevel: logger.Info,
+ FileJSONFormat: true,
+ Filename: "log.log",
+ }
+ // use zap logger
+ log := zaplogger.NewZapLoggerWithConfig(config)
+
+ kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLogger(log).
+ WithKinesisEndpoint(kinesisEndpoint)
+
+ runTest(kclConfig, true, t)
+}
+
+func TestWorkerStatic(t *testing.T) {
+ //t.Skip("Need to provide actual credentials")
+
+ // Fill in the credentials for accessing Kinesis and DynamoDB.
+ // Note: use empty string as SessionToken for long-term credentials.
+ kinesisCreds := credentials.NewStaticCredentialsProvider("", "", "")
+ dynamoCreds := credentials.NewStaticCredentialsProvider("", "", "")
+
+ kclConfig := cfg.NewKinesisClientLibConfigWithCredentials(appName, streamName, regionName, workerID, &kinesisCreds, &dynamoCreds).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithKinesisEndpoint(kinesisEndpoint).
+ WithDynamoDBEndpoint(dynamoEndpoint)
+
+ runTest(kclConfig, false, t)
+}
+
+func TestWorkerAssumeRole(t *testing.T) {
+ t.Skip("Need to provide actual roleARN")
+
+ // Initial credentials loaded from SDK's default credential chain. Such as
+ // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+ // Role. These credentials will be used to make the STS Assume Role API.
+ //sess := session.Must(session.NewSession())
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN.
+ //kinesisCreds := stscreds.NewAssumeRoleProvider(sess, "arn:aws:iam::*:role/kcl-test-publisher")
+ kinesisCreds := credentials.NewStaticCredentialsProvider("", "", "")
+ dynamoCreds := credentials.NewStaticCredentialsProvider("", "", "")
+
+ kclConfig := cfg.NewKinesisClientLibConfigWithCredentials(appName, streamName, regionName, workerID, &kinesisCreds, &dynamoCreds).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithKinesisEndpoint(kinesisEndpoint).
+ WithDynamoDBEndpoint(dynamoEndpoint)
+
+ runTest(kclConfig, false, t)
+}
+
+func TestEnhancedFanOutConsumer(t *testing.T) {
+ // At minimal, use standard logrus logger
+ // log := logger.NewLogrusLogger(logrus.StandardLogger())
+ //
+ // In order to have precise control over logging. Use logger with config
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Debug,
+ ConsoleJSONFormat: false,
+ EnableFile: true,
+ FileLevel: logger.Info,
+ FileJSONFormat: true,
+ Filename: "log.log",
+ }
+ // Use logrus logger
+ log := logger.NewLogrusLoggerWithConfig(config)
+
+ kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithEnhancedFanOutConsumerName(consumerName).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLogger(log)
+
+ runTest(kclConfig, false, t)
+}
+
+func TestEnhancedFanOutConsumerDefaultConsumerName(t *testing.T) {
+ // At minimal, use standard logrus logger
+ // log := logger.NewLogrusLogger(logrus.StandardLogger())
+ //
+ // In order to have precise control over logging. Use logger with config
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Debug,
+ ConsoleJSONFormat: false,
+ EnableFile: true,
+ FileLevel: logger.Info,
+ FileJSONFormat: true,
+ Filename: "log.log",
+ }
+ // Use logrus logger
+ log := logger.NewLogrusLoggerWithConfig(config)
+
+ kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithEnhancedFanOutConsumer(true).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLogger(log)
+
+ runTest(kclConfig, false, t)
+}
+
+func TestEnhancedFanOutConsumerARN(t *testing.T) {
+ t.Skip("Need to provide actual consumerARN")
+
+ consumerARN := "arn:aws:kinesis:*:stream/kcl-test/consumer/fanout-poc-consumer-test:*"
+ // At minimal, use standard logrus logger
+ // log := logger.NewLogrusLogger(logrus.StandardLogger())
+ //
+ // In order to have precise control over logging. Use logger with config
+ config := logger.Configuration{
+ EnableConsole: true,
+ ConsoleLevel: logger.Debug,
+ ConsoleJSONFormat: false,
+ EnableFile: true,
+ FileLevel: logger.Info,
+ FileJSONFormat: true,
+ Filename: "log.log",
+ }
+ // Use logrus logger
+ log := logger.NewLogrusLoggerWithConfig(config)
+
+ kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID).
+ WithInitialPositionInStream(cfg.LATEST).
+ WithEnhancedFanOutConsumerARN(consumerARN).
+ WithMaxRecords(10).
+ WithMaxLeasesForWorker(1).
+ WithShardSyncIntervalMillis(5000).
+ WithFailoverTimeMillis(300000).
+ WithLogger(log)
+
+ runTest(kclConfig, false, t)
+}
+
+func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *testing.T) {
+ assert.Equal(t, regionName, kclConfig.RegionName)
+ assert.Equal(t, streamName, kclConfig.StreamName)
+
+ // configure cloudwatch as metrics system
+ kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem))
+
+ // Put some data into stream.
+ kc := NewKinesisClient(t, regionName, kclConfig.KinesisEndpoint, kclConfig.KinesisCredentials)
+ // publishSomeData(t, kc)
+ stop := continuouslyPublishSomeData(t, kc)
+ defer stop()
+
+ worker := wk.NewWorker(recordProcessorFactory(t), kclConfig)
+ err := worker.Start()
+ assert.Nil(t, err)
+
+ sigs := make(chan os.Signal, 1)
+ signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
+
+ // Signal processing.
+ go func() {
+ sig := <-sigs
+ t.Logf("Received signal %s. Exiting", sig)
+ worker.Shutdown()
+ // some other processing before exit.
+ //os.Exit(0)
+ }()
+
+ if triggersig {
+ t.Log("Trigger signal SIGINT")
+ p, _ := os.FindProcess(os.Getpid())
+ _ = p.Signal(os.Interrupt)
+ }
+
+ // wait a few seconds before shutdown processing
+ time.Sleep(30 * time.Second)
+
+ switch metricsSystem {
+ case "prometheus":
+ res, err := http.Get("http://localhost:8080/metrics")
+ if err != nil {
+ t.Fatalf("Error scraping Prometheus endpoint %s", err)
+ }
+
+ var parser expfmt.TextParser
+ parsed, err := parser.TextToMetricFamilies(res.Body)
+ _ = res.Body.Close()
+ if err != nil {
+ t.Errorf("Error reading monitoring response %s", err)
+ }
+
+ t.Logf("Prometheus: %+v", parsed)
+ }
+
+ t.Log("Calling normal shutdown at the end of application.")
+ worker.Shutdown()
+}
+
+// configure different metrics system
+func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service string) metrics.MonitoringService {
+
+ if service == "cloudwatch" {
+ return cloudwatch.NewMonitoringServiceWithOptions(kclConfig.RegionName,
+ kclConfig.KinesisCredentials,
+ kclConfig.Logger,
+ cloudwatch.DefaultCloudwatchMetricsBufferDuration)
+ }
+
+ if service == "prometheus" {
+ return prometheus.NewMonitoringService(":8080", regionName, kclConfig.Logger)
+ }
+
+ return nil
+}